2019-12-04 00:29:12 +08:00
|
|
|
#!/usr/bin/env python3
|
2005-12-02 01:34:21 +08:00
|
|
|
#
|
|
|
|
# This is the API builder, it parses the C sources and build the
|
|
|
|
# API formal description in XML.
|
|
|
|
#
|
|
|
|
# See Copyright for the status of this software.
|
|
|
|
#
|
|
|
|
# daniel@veillard.com
|
|
|
|
#
|
2018-03-15 17:30:03 +08:00
|
|
|
|
2018-03-20 14:48:45 +08:00
|
|
|
import os
|
|
|
|
import sys
|
2005-12-02 01:34:21 +08:00
|
|
|
import glob
|
2013-01-11 18:39:19 +08:00
|
|
|
import re
|
2020-06-25 20:31:59 +08:00
|
|
|
import argparse
|
2005-12-02 01:34:21 +08:00
|
|
|
|
2018-03-20 14:48:47 +08:00
|
|
|
quiet = True
|
|
|
|
warnings = 0
|
|
|
|
debug = False
|
|
|
|
debugsym = None
|
2005-12-02 01:34:21 +08:00
|
|
|
|
|
|
|
#
|
|
|
|
# C parser analysis code
|
|
|
|
#
|
2007-08-14 13:57:07 +08:00
|
|
|
included_files = {
|
2019-09-25 00:26:29 +08:00
|
|
|
"libvirt-common.h": "header with general libvirt API definitions",
|
|
|
|
"libvirt-domain.h": "header with general libvirt API definitions",
|
|
|
|
"libvirt-domain-checkpoint.h": "header with general libvirt API definitions",
|
|
|
|
"libvirt-domain-snapshot.h": "header with general libvirt API definitions",
|
|
|
|
"libvirt-event.h": "header with general libvirt API definitions",
|
|
|
|
"libvirt-host.h": "header with general libvirt API definitions",
|
|
|
|
"libvirt-interface.h": "header with general libvirt API definitions",
|
|
|
|
"libvirt-network.h": "header with general libvirt API definitions",
|
|
|
|
"libvirt-nodedev.h": "header with general libvirt API definitions",
|
|
|
|
"libvirt-nwfilter.h": "header with general libvirt API definitions",
|
|
|
|
"libvirt-secret.h": "header with general libvirt API definitions",
|
|
|
|
"libvirt-storage.h": "header with general libvirt API definitions",
|
|
|
|
"libvirt-stream.h": "header with general libvirt API definitions",
|
|
|
|
"virterror.h": "header with error specific API definitions",
|
|
|
|
"libvirt.c": "Main interfaces for the libvirt library",
|
|
|
|
"libvirt-domain.c": "Domain interfaces for the libvirt library",
|
|
|
|
"libvirt-domain-checkpoint.c": "Domain checkpoint interfaces for the libvirt library",
|
|
|
|
"libvirt-domain-snapshot.c": "Domain snapshot interfaces for the libvirt library",
|
|
|
|
"libvirt-host.c": "Host interfaces for the libvirt library",
|
|
|
|
"libvirt-interface.c": "Interface interfaces for the libvirt library",
|
|
|
|
"libvirt-network.c": "Network interfaces for the libvirt library",
|
|
|
|
"libvirt-nodedev.c": "Node device interfaces for the libvirt library",
|
|
|
|
"libvirt-nwfilter.c": "NWFilter interfaces for the libvirt library",
|
|
|
|
"libvirt-secret.c": "Secret interfaces for the libvirt library",
|
|
|
|
"libvirt-storage.c": "Storage interfaces for the libvirt library",
|
|
|
|
"libvirt-stream.c": "Stream interfaces for the libvirt library",
|
|
|
|
"virerror.c": "implements error handling and reporting code for libvirt",
|
|
|
|
"virevent.c": "event loop for monitoring file handles",
|
|
|
|
"virtypedparam-public.c": "virTypedParameters APIs",
|
2005-12-02 01:34:21 +08:00
|
|
|
}
|
|
|
|
|
2011-09-09 18:55:21 +08:00
|
|
|
qemu_included_files = {
|
2019-09-25 00:26:29 +08:00
|
|
|
"libvirt-qemu.h": "header with QEMU specific API definitions",
|
|
|
|
"libvirt-qemu.c": "Implementations for the QEMU specific APIs",
|
2011-09-09 18:55:21 +08:00
|
|
|
}
|
|
|
|
|
Introduce an LXC specific public API & library
This patch introduces support for LXC specific public APIs. In
common with what was done for QEMU, this creates a libvirt_lxc.so
library and libvirt/libvirt-lxc.h header file.
The actual APIs are
int virDomainLxcOpenNamespace(virDomainPtr domain,
int **fdlist,
unsigned int flags);
int virDomainLxcEnterNamespace(virDomainPtr domain,
unsigned int nfdlist,
int *fdlist,
unsigned int *noldfdlist,
int **oldfdlist,
unsigned int flags);
which provide a way to use the setns() system call to move the
calling process into the container's namespace. It is not
practical to write in a generically applicable manner. The
nearest that we could get to such an API would be an API which
allows to pass a command + argv to be executed inside a
container. Even if we had such a generic API, this LXC specific
API is still useful, because it allows the caller to maintain
the current process context, in particular any I/O streams they
have open.
NB the virDomainLxcEnterNamespace() API is special in that it
runs client side, so does not involve the internal driver API.
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-12-21 21:15:19 +08:00
|
|
|
lxc_included_files = {
|
2019-09-25 00:26:29 +08:00
|
|
|
"libvirt-lxc.h": "header with LXC specific API definitions",
|
|
|
|
"libvirt-lxc.c": "Implementations for the LXC specific APIs",
|
Introduce an LXC specific public API & library
This patch introduces support for LXC specific public APIs. In
common with what was done for QEMU, this creates a libvirt_lxc.so
library and libvirt/libvirt-lxc.h header file.
The actual APIs are
int virDomainLxcOpenNamespace(virDomainPtr domain,
int **fdlist,
unsigned int flags);
int virDomainLxcEnterNamespace(virDomainPtr domain,
unsigned int nfdlist,
int *fdlist,
unsigned int *noldfdlist,
int **oldfdlist,
unsigned int flags);
which provide a way to use the setns() system call to move the
calling process into the container's namespace. It is not
practical to write in a generically applicable manner. The
nearest that we could get to such an API would be an API which
allows to pass a command + argv to be executed inside a
container. Even if we had such a generic API, this LXC specific
API is still useful, because it allows the caller to maintain
the current process context, in particular any I/O streams they
have open.
NB the virDomainLxcEnterNamespace() API is special in that it
runs client side, so does not involve the internal driver API.
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-12-21 21:15:19 +08:00
|
|
|
}
|
|
|
|
|
2015-04-15 22:23:25 +08:00
|
|
|
admin_included_files = {
|
2019-09-25 00:26:29 +08:00
|
|
|
"libvirt-admin.h": "header with admin specific API definitions",
|
|
|
|
"admin/libvirt-admin.c": "Implementations for the admin specific APIs",
|
2015-04-15 22:23:25 +08:00
|
|
|
}
|
|
|
|
|
2005-12-02 01:34:21 +08:00
|
|
|
ignored_words = {
|
2019-09-25 00:26:29 +08:00
|
|
|
"G_GNUC_UNUSED": (0, "macro keyword"),
|
|
|
|
"G_GNUC_NULL_TERMINATED": (0, "macro keyword"),
|
|
|
|
"VIR_DEPRECATED": (0, "macro keyword"),
|
|
|
|
"VIR_EXPORT_VAR": (0, "macro keyword"),
|
|
|
|
"WINAPI": (0, "Windows keyword"),
|
|
|
|
"__declspec": (3, "Windows keyword"),
|
|
|
|
"__stdcall": (0, "Windows keyword"),
|
2005-12-02 01:34:21 +08:00
|
|
|
}
|
|
|
|
|
2008-11-25 23:48:11 +08:00
|
|
|
ignored_functions = {
|
2019-09-25 00:26:29 +08:00
|
|
|
"virConnectSupportsFeature": "private function for remote access",
|
2018-09-20 19:33:45 +08:00
|
|
|
"virDomainMigrateCheckNotLocal": "private function for migration",
|
2019-09-25 00:26:29 +08:00
|
|
|
"virDomainMigrateFinish": "private function for migration",
|
|
|
|
"virDomainMigrateFinish2": "private function for migration",
|
|
|
|
"virDomainMigratePerform": "private function for migration",
|
|
|
|
"virDomainMigratePrepare": "private function for migration",
|
|
|
|
"virDomainMigratePrepare2": "private function for migration",
|
|
|
|
"virDomainMigratePrepareTunnel": "private function for tunnelled migration",
|
|
|
|
"virDomainMigrateBegin3": "private function for migration",
|
|
|
|
"virDomainMigrateFinish3": "private function for migration",
|
|
|
|
"virDomainMigratePerform3": "private function for migration",
|
|
|
|
"virDomainMigratePrepare3": "private function for migration",
|
|
|
|
"virDomainMigrateConfirm3": "private function for migration",
|
|
|
|
"virDomainMigratePrepareTunnel3": "private function for tunnelled migration",
|
|
|
|
"DllMain": "specific function for Win32",
|
|
|
|
"virTypedParamsValidate": "internal function in virtypedparam.c",
|
|
|
|
"virTypedParameterValidateSet": "internal function in virtypedparam.c",
|
|
|
|
"virTypedParameterAssign": "internal function in virtypedparam.c",
|
|
|
|
"virTypedParameterAssignFromStr": "internal function in virtypedparam.c",
|
|
|
|
"virTypedParameterToString": "internal function in virtypedparam.c",
|
|
|
|
"virTypedParamsCheck": "internal function in virtypedparam.c",
|
|
|
|
"virTypedParamsCopy": "internal function in virtypedparam.c",
|
|
|
|
"virDomainMigrateBegin3Params": "private function for migration",
|
|
|
|
"virDomainMigrateFinish3Params": "private function for migration",
|
|
|
|
"virDomainMigratePerform3Params": "private function for migration",
|
|
|
|
"virDomainMigratePrepare3Params": "private function for migration",
|
|
|
|
"virDomainMigrateConfirm3Params": "private function for migration",
|
|
|
|
"virDomainMigratePrepareTunnel3Params": "private function for tunnelled migration",
|
|
|
|
"virErrorCopyNew": "private",
|
2008-11-25 23:48:11 +08:00
|
|
|
}
|
|
|
|
|
2022-04-23 03:23:42 +08:00
|
|
|
# The version in the .sym file might different from
|
|
|
|
# the real version that the function was introduced.
|
|
|
|
# This dict's value is the correct version, as it should
|
|
|
|
# be in the docstrings.
|
|
|
|
ignored_function_versions = {
|
|
|
|
'virDomainSetBlockThreshold': '3.2.0',
|
|
|
|
'virAdmServerUpdateTlsFiles': '6.2.0',
|
|
|
|
'virDomainBlockPeek': '0.4.3',
|
|
|
|
'virDomainMemoryPeek': '0.4.3',
|
|
|
|
}
|
|
|
|
|
2011-05-31 16:41:37 +08:00
|
|
|
ignored_macros = {
|
2019-09-25 00:26:29 +08:00
|
|
|
"_virSchedParameter": "backward compatibility macro for virTypedParameter",
|
|
|
|
"_virBlkioParameter": "backward compatibility macro for virTypedParameter",
|
|
|
|
"_virMemoryParameter": "backward compatibility macro for virTypedParameter",
|
2011-05-31 16:41:37 +08:00
|
|
|
}
|
|
|
|
|
2016-04-21 20:31:23 +08:00
|
|
|
# macros that should be completely skipped
|
|
|
|
hidden_macros = {
|
2019-09-25 00:26:29 +08:00
|
|
|
"VIR_DEPRECATED": "internal macro to mark deprecated apis",
|
|
|
|
"VIR_EXPORT_VAR": "internal macro to mark exported vars",
|
2016-04-21 20:31:23 +08:00
|
|
|
}
|
|
|
|
|
2019-09-24 20:55:56 +08:00
|
|
|
|
2005-12-02 01:34:21 +08:00
|
|
|
def escape(raw):
|
2018-03-15 17:42:44 +08:00
|
|
|
raw = raw.replace('&', '&')
|
|
|
|
raw = raw.replace('<', '<')
|
|
|
|
raw = raw.replace('>', '>')
|
|
|
|
raw = raw.replace("'", ''')
|
|
|
|
raw = raw.replace('"', '"')
|
2005-12-02 01:34:21 +08:00
|
|
|
return raw
|
|
|
|
|
2019-09-24 20:55:56 +08:00
|
|
|
|
2005-12-02 01:34:21 +08:00
|
|
|
def uniq(items):
|
2018-03-20 14:48:56 +08:00
|
|
|
return sorted(set(items))
|
2005-12-02 01:34:21 +08:00
|
|
|
|
2019-09-24 20:55:56 +08:00
|
|
|
|
2005-12-02 01:34:21 +08:00
|
|
|
class identifier:
|
2018-03-20 14:48:54 +08:00
|
|
|
def __init__(self, name, header=None, module=None, type=None, lineno=0,
|
|
|
|
info=None, extra=None, conditionals=None):
|
2005-12-02 01:34:21 +08:00
|
|
|
self.name = name
|
2011-02-16 23:57:50 +08:00
|
|
|
self.header = header
|
|
|
|
self.module = module
|
|
|
|
self.type = type
|
|
|
|
self.info = info
|
|
|
|
self.extra = extra
|
|
|
|
self.lineno = lineno
|
|
|
|
self.static = 0
|
2013-08-22 17:16:03 +08:00
|
|
|
if conditionals is None or len(conditionals) == 0:
|
2011-02-16 23:57:50 +08:00
|
|
|
self.conditionals = None
|
|
|
|
else:
|
|
|
|
self.conditionals = conditionals[:]
|
2011-05-12 18:19:42 +08:00
|
|
|
if self.name == debugsym and not quiet:
|
2018-03-15 17:30:03 +08:00
|
|
|
print("=> define %s : %s" % (debugsym, (module, type, info,
|
|
|
|
extra, conditionals)))
|
2005-12-02 01:34:21 +08:00
|
|
|
|
|
|
|
def __repr__(self):
|
|
|
|
r = "%s %s:" % (self.type, self.name)
|
2011-02-16 23:57:50 +08:00
|
|
|
if self.static:
|
|
|
|
r = r + " static"
|
2013-08-22 17:16:03 +08:00
|
|
|
if self.module is not None:
|
2018-03-20 14:49:07 +08:00
|
|
|
r = r + " from %s" % self.module
|
2013-08-22 17:16:03 +08:00
|
|
|
if self.info is not None:
|
2018-03-15 17:36:06 +08:00
|
|
|
r = r + " " + repr(self.info)
|
2013-08-22 17:16:03 +08:00
|
|
|
if self.extra is not None:
|
2018-03-15 17:36:06 +08:00
|
|
|
r = r + " " + repr(self.extra)
|
2013-08-22 17:16:03 +08:00
|
|
|
if self.conditionals is not None:
|
2018-03-15 17:36:06 +08:00
|
|
|
r = r + " " + repr(self.conditionals)
|
2011-02-16 23:57:50 +08:00
|
|
|
return r
|
2005-12-02 01:34:21 +08:00
|
|
|
|
|
|
|
def set_header(self, header):
|
|
|
|
self.header = header
|
2019-09-24 20:55:56 +08:00
|
|
|
|
2005-12-02 01:34:21 +08:00
|
|
|
def set_module(self, module):
|
|
|
|
self.module = module
|
2019-09-24 20:55:56 +08:00
|
|
|
|
2005-12-02 01:34:21 +08:00
|
|
|
def set_type(self, type):
|
|
|
|
self.type = type
|
2019-09-24 20:55:56 +08:00
|
|
|
|
2005-12-02 01:34:21 +08:00
|
|
|
def set_info(self, info):
|
|
|
|
self.info = info
|
2019-09-24 20:55:56 +08:00
|
|
|
|
2005-12-02 01:34:21 +08:00
|
|
|
def set_extra(self, extra):
|
|
|
|
self.extra = extra
|
2019-09-24 20:55:56 +08:00
|
|
|
|
2005-12-02 01:34:21 +08:00
|
|
|
def set_lineno(self, lineno):
|
|
|
|
self.lineno = lineno
|
2019-09-24 20:55:56 +08:00
|
|
|
|
2005-12-02 01:34:21 +08:00
|
|
|
def set_static(self, static):
|
|
|
|
self.static = static
|
2019-09-24 20:55:56 +08:00
|
|
|
|
2005-12-02 01:34:21 +08:00
|
|
|
def set_conditionals(self, conditionals):
|
2013-08-22 17:16:03 +08:00
|
|
|
if conditionals is None or len(conditionals) == 0:
|
2011-02-16 23:57:50 +08:00
|
|
|
self.conditionals = None
|
|
|
|
else:
|
|
|
|
self.conditionals = conditionals[:]
|
2005-12-02 01:34:21 +08:00
|
|
|
|
|
|
|
def get_name(self):
|
|
|
|
return self.name
|
2019-09-24 20:55:56 +08:00
|
|
|
|
2005-12-02 01:34:21 +08:00
|
|
|
def get_header(self):
|
|
|
|
return self.module
|
2019-09-24 20:55:56 +08:00
|
|
|
|
2005-12-02 01:34:21 +08:00
|
|
|
def get_module(self):
|
|
|
|
return self.module
|
2019-09-24 20:55:56 +08:00
|
|
|
|
2005-12-02 01:34:21 +08:00
|
|
|
def get_type(self):
|
|
|
|
return self.type
|
2019-09-24 20:55:56 +08:00
|
|
|
|
2005-12-02 01:34:21 +08:00
|
|
|
def get_info(self):
|
|
|
|
return self.info
|
2019-09-24 20:55:56 +08:00
|
|
|
|
2005-12-02 01:34:21 +08:00
|
|
|
def get_lineno(self):
|
|
|
|
return self.lineno
|
2019-09-24 20:55:56 +08:00
|
|
|
|
2005-12-02 01:34:21 +08:00
|
|
|
def get_extra(self):
|
|
|
|
return self.extra
|
2019-09-24 20:55:56 +08:00
|
|
|
|
2005-12-02 01:34:21 +08:00
|
|
|
def get_static(self):
|
|
|
|
return self.static
|
2019-09-24 20:55:56 +08:00
|
|
|
|
2005-12-02 01:34:21 +08:00
|
|
|
def get_conditionals(self):
|
|
|
|
return self.conditionals
|
|
|
|
|
2018-03-20 14:48:54 +08:00
|
|
|
def update(self, header, module, type=None, info=None, extra=None,
|
2005-12-02 01:34:21 +08:00
|
|
|
conditionals=None):
|
2011-05-12 18:19:42 +08:00
|
|
|
if self.name == debugsym and not quiet:
|
2018-03-15 17:30:03 +08:00
|
|
|
print("=> update %s : %s" % (debugsym, (module, type, info,
|
|
|
|
extra, conditionals)))
|
2013-08-22 17:16:03 +08:00
|
|
|
if header is not None and self.header is None:
|
2011-02-16 23:57:50 +08:00
|
|
|
self.set_header(module)
|
2013-08-22 17:16:03 +08:00
|
|
|
if module is not None and (self.module is None or self.header == self.module):
|
2011-02-16 23:57:50 +08:00
|
|
|
self.set_module(module)
|
2013-08-22 17:16:03 +08:00
|
|
|
if type is not None and self.type is None:
|
2011-02-16 23:57:50 +08:00
|
|
|
self.set_type(type)
|
2013-08-22 17:16:03 +08:00
|
|
|
if info is not None:
|
2011-02-16 23:57:50 +08:00
|
|
|
self.set_info(info)
|
2013-08-22 17:16:03 +08:00
|
|
|
if extra is not None:
|
2011-02-16 23:57:50 +08:00
|
|
|
self.set_extra(extra)
|
2013-08-22 17:16:03 +08:00
|
|
|
if conditionals is not None:
|
2011-02-16 23:57:50 +08:00
|
|
|
self.set_conditionals(conditionals)
|
2005-12-02 01:34:21 +08:00
|
|
|
|
2019-09-24 20:55:56 +08:00
|
|
|
|
2005-12-02 01:34:21 +08:00
|
|
|
class index:
|
2018-03-20 14:48:54 +08:00
|
|
|
def __init__(self, name="noname"):
|
2005-12-02 01:34:21 +08:00
|
|
|
self.name = name
|
|
|
|
self.identifiers = {}
|
|
|
|
self.functions = {}
|
2011-02-16 23:57:50 +08:00
|
|
|
self.variables = {}
|
|
|
|
self.includes = {}
|
|
|
|
self.structs = {}
|
2011-06-20 11:25:34 +08:00
|
|
|
self.unions = {}
|
2011-02-16 23:57:50 +08:00
|
|
|
self.enums = {}
|
|
|
|
self.typedefs = {}
|
|
|
|
self.macros = {}
|
|
|
|
self.references = {}
|
|
|
|
self.info = {}
|
2005-12-02 01:34:21 +08:00
|
|
|
|
2016-04-25 20:23:04 +08:00
|
|
|
def warning(self, msg):
|
|
|
|
global warnings
|
|
|
|
warnings = warnings + 1
|
2018-03-15 17:30:03 +08:00
|
|
|
print(msg)
|
2016-04-25 20:23:04 +08:00
|
|
|
|
2018-03-20 14:48:54 +08:00
|
|
|
def add_ref(self, name, header, module, static, type, lineno, info=None, extra=None, conditionals=None):
|
2005-12-02 01:34:21 +08:00
|
|
|
if name[0:2] == '__':
|
2011-02-16 23:57:50 +08:00
|
|
|
return None
|
2005-12-02 01:34:21 +08:00
|
|
|
d = None
|
|
|
|
try:
|
2018-03-20 14:49:00 +08:00
|
|
|
d = self.identifiers[name]
|
|
|
|
d.update(header, module, type, lineno, info, extra, conditionals)
|
2019-09-24 20:42:51 +08:00
|
|
|
except Exception:
|
2018-03-20 14:49:00 +08:00
|
|
|
d = identifier(name, header, module, type, lineno, info, extra,
|
|
|
|
conditionals)
|
|
|
|
self.identifiers[name] = d
|
2005-12-02 01:34:21 +08:00
|
|
|
|
2013-08-22 17:16:03 +08:00
|
|
|
if d is not None and static == 1:
|
2011-02-16 23:57:50 +08:00
|
|
|
d.set_static(1)
|
2005-12-02 01:34:21 +08:00
|
|
|
|
2013-08-22 17:16:03 +08:00
|
|
|
if d is not None and name is not None and type is not None:
|
2011-02-16 23:57:50 +08:00
|
|
|
self.references[name] = d
|
2005-12-02 01:34:21 +08:00
|
|
|
|
2011-05-12 18:19:42 +08:00
|
|
|
if name == debugsym and not quiet:
|
2018-03-15 17:30:03 +08:00
|
|
|
print("New ref: %s" % (d))
|
2005-12-02 01:34:21 +08:00
|
|
|
|
2011-02-16 23:57:50 +08:00
|
|
|
return d
|
2005-12-02 01:34:21 +08:00
|
|
|
|
2018-03-20 14:49:00 +08:00
|
|
|
def add(self, name, header, module, static, type, lineno, info=None,
|
|
|
|
extra=None, conditionals=None):
|
2005-12-02 01:34:21 +08:00
|
|
|
if name[0:2] == '__':
|
2011-02-16 23:57:50 +08:00
|
|
|
return None
|
2005-12-02 01:34:21 +08:00
|
|
|
d = None
|
|
|
|
try:
|
2018-03-20 14:49:00 +08:00
|
|
|
d = self.identifiers[name]
|
|
|
|
d.update(header, module, type, lineno, info, extra, conditionals)
|
2019-09-24 20:42:51 +08:00
|
|
|
except Exception:
|
2018-03-20 14:49:00 +08:00
|
|
|
d = identifier(name, header, module, type, lineno, info, extra,
|
|
|
|
conditionals)
|
|
|
|
self.identifiers[name] = d
|
2011-02-16 23:57:50 +08:00
|
|
|
|
2013-08-22 17:16:03 +08:00
|
|
|
if d is not None and static == 1:
|
2011-02-16 23:57:50 +08:00
|
|
|
d.set_static(1)
|
|
|
|
|
2013-08-22 17:16:03 +08:00
|
|
|
if d is not None and name is not None and type is not None:
|
2018-03-20 14:49:03 +08:00
|
|
|
type_map = {
|
|
|
|
"function": self.functions,
|
|
|
|
"functype": self.functions,
|
|
|
|
"variable": self.variables,
|
|
|
|
"include": self.includes,
|
|
|
|
"struct": self.structs,
|
|
|
|
"union": self.unions,
|
|
|
|
"enum": self.enums,
|
|
|
|
"typedef": self.typedefs,
|
|
|
|
"macro": self.macros
|
|
|
|
}
|
|
|
|
if type in type_map:
|
|
|
|
type_map[type][name] = d
|
2011-02-16 23:57:50 +08:00
|
|
|
else:
|
2022-05-07 09:17:31 +08:00
|
|
|
self.warning("Unable to register type %s" % type)
|
2011-02-16 23:57:50 +08:00
|
|
|
|
2011-05-12 18:19:42 +08:00
|
|
|
if name == debugsym and not quiet:
|
2018-03-15 17:30:03 +08:00
|
|
|
print("New symbol: %s" % (d))
|
2011-02-16 23:57:50 +08:00
|
|
|
|
|
|
|
return d
|
2005-12-02 01:34:21 +08:00
|
|
|
|
|
|
|
def merge(self, idx):
|
|
|
|
for id in idx.functions.keys():
|
2018-03-20 14:49:00 +08:00
|
|
|
#
|
|
|
|
# macro might be used to override functions or variables
|
|
|
|
# definitions
|
|
|
|
#
|
|
|
|
if id in self.macros:
|
|
|
|
del self.macros[id]
|
|
|
|
if id in self.functions:
|
|
|
|
self.warning("function %s from %s redeclared in %s" % (
|
2011-05-12 18:19:42 +08:00
|
|
|
id, self.functions[id].header, idx.functions[id].header))
|
2018-03-20 14:49:00 +08:00
|
|
|
else:
|
|
|
|
self.functions[id] = idx.functions[id]
|
|
|
|
self.identifiers[id] = idx.functions[id]
|
2005-12-02 01:34:21 +08:00
|
|
|
for id in idx.variables.keys():
|
2018-03-20 14:49:00 +08:00
|
|
|
#
|
|
|
|
# macro might be used to override functions or variables
|
|
|
|
# definitions
|
|
|
|
#
|
|
|
|
if id in self.macros:
|
|
|
|
del self.macros[id]
|
|
|
|
if id in self.variables:
|
|
|
|
self.warning("variable %s from %s redeclared in %s" % (
|
2011-05-12 18:19:42 +08:00
|
|
|
id, self.variables[id].header, idx.variables[id].header))
|
2018-03-20 14:49:00 +08:00
|
|
|
else:
|
|
|
|
self.variables[id] = idx.variables[id]
|
|
|
|
self.identifiers[id] = idx.variables[id]
|
2005-12-02 01:34:21 +08:00
|
|
|
for id in idx.structs.keys():
|
2018-03-20 14:49:00 +08:00
|
|
|
if id in self.structs:
|
|
|
|
self.warning("struct %s from %s redeclared in %s" % (
|
2011-05-12 18:19:42 +08:00
|
|
|
id, self.structs[id].header, idx.structs[id].header))
|
2018-03-20 14:49:00 +08:00
|
|
|
else:
|
|
|
|
self.structs[id] = idx.structs[id]
|
|
|
|
self.identifiers[id] = idx.structs[id]
|
2011-06-20 11:25:34 +08:00
|
|
|
for id in idx.unions.keys():
|
2018-03-20 14:49:00 +08:00
|
|
|
if id in self.unions:
|
|
|
|
print("union %s from %s redeclared in %s" % (
|
2018-03-15 17:30:03 +08:00
|
|
|
id, self.unions[id].header, idx.unions[id].header))
|
2018-03-20 14:49:00 +08:00
|
|
|
else:
|
|
|
|
self.unions[id] = idx.unions[id]
|
|
|
|
self.identifiers[id] = idx.unions[id]
|
2005-12-02 01:34:21 +08:00
|
|
|
for id in idx.typedefs.keys():
|
2018-03-20 14:49:00 +08:00
|
|
|
if id in self.typedefs:
|
|
|
|
self.warning("typedef %s from %s redeclared in %s" % (
|
2011-05-12 18:19:42 +08:00
|
|
|
id, self.typedefs[id].header, idx.typedefs[id].header))
|
2018-03-20 14:49:00 +08:00
|
|
|
else:
|
|
|
|
self.typedefs[id] = idx.typedefs[id]
|
|
|
|
self.identifiers[id] = idx.typedefs[id]
|
2005-12-02 01:34:21 +08:00
|
|
|
for id in idx.macros.keys():
|
2018-03-20 14:49:00 +08:00
|
|
|
#
|
|
|
|
# macro might be used to override functions or variables
|
|
|
|
# definitions
|
|
|
|
#
|
|
|
|
if id in self.variables:
|
|
|
|
continue
|
|
|
|
if id in self.functions:
|
|
|
|
continue
|
|
|
|
if id in self.enums:
|
|
|
|
continue
|
|
|
|
if id in self.macros:
|
|
|
|
self.warning("macro %s from %s redeclared in %s" % (
|
2011-05-12 18:19:42 +08:00
|
|
|
id, self.macros[id].header, idx.macros[id].header))
|
2018-03-20 14:49:00 +08:00
|
|
|
else:
|
|
|
|
self.macros[id] = idx.macros[id]
|
|
|
|
self.identifiers[id] = idx.macros[id]
|
2005-12-02 01:34:21 +08:00
|
|
|
for id in idx.enums.keys():
|
2018-03-20 14:49:00 +08:00
|
|
|
if id in self.enums:
|
|
|
|
self.warning("enum %s from %s redeclared in %s" % (
|
2011-05-12 18:19:42 +08:00
|
|
|
id, self.enums[id].header, idx.enums[id].header))
|
2018-03-20 14:49:00 +08:00
|
|
|
else:
|
|
|
|
self.enums[id] = idx.enums[id]
|
|
|
|
self.identifiers[id] = idx.enums[id]
|
2005-12-02 01:34:21 +08:00
|
|
|
|
|
|
|
def merge_public(self, idx):
|
|
|
|
for id in idx.functions.keys():
|
2018-03-20 14:49:00 +08:00
|
|
|
if id in self.functions:
|
|
|
|
up = idx.functions[id]
|
|
|
|
# check that function condition agrees with header
|
|
|
|
if up.conditionals != self.functions[id].conditionals:
|
|
|
|
self.warning("Header condition differs from Function"
|
|
|
|
" for %s:" % id)
|
|
|
|
self.warning(" H: %s" % self.functions[id].conditionals)
|
|
|
|
self.warning(" C: %s" % up.conditionals)
|
|
|
|
self.functions[id].update(None, up.module, up.type, up.info,
|
|
|
|
up.extra)
|
|
|
|
# else:
|
|
|
|
# print("Function %s from %s is not declared in headers" % (
|
|
|
|
# id, idx.functions[id].module))
|
|
|
|
# TODO: do the same for variables.
|
2005-12-02 01:34:21 +08:00
|
|
|
|
|
|
|
def analyze_dict(self, type, dict):
|
|
|
|
count = 0
|
2011-02-16 23:57:50 +08:00
|
|
|
public = 0
|
2005-12-02 01:34:21 +08:00
|
|
|
for name in dict.keys():
|
2011-02-16 23:57:50 +08:00
|
|
|
id = dict[name]
|
|
|
|
count = count + 1
|
|
|
|
if id.static == 0:
|
|
|
|
public = public + 1
|
2005-12-02 01:34:21 +08:00
|
|
|
if count != public:
|
2018-03-15 17:30:03 +08:00
|
|
|
print(" %d %s , %d public" % (count, type, public))
|
2011-02-16 23:57:50 +08:00
|
|
|
elif count != 0:
|
2018-03-15 17:30:03 +08:00
|
|
|
print(" %d public %s" % (count, type))
|
2005-12-02 01:34:21 +08:00
|
|
|
|
|
|
|
def analyze(self):
|
2011-05-12 18:19:42 +08:00
|
|
|
if not quiet:
|
|
|
|
self.analyze_dict("functions", self.functions)
|
|
|
|
self.analyze_dict("variables", self.variables)
|
|
|
|
self.analyze_dict("structs", self.structs)
|
|
|
|
self.analyze_dict("unions", self.unions)
|
|
|
|
self.analyze_dict("typedefs", self.typedefs)
|
|
|
|
self.analyze_dict("macros", self.macros)
|
2008-02-06 03:27:37 +08:00
|
|
|
|
2019-09-24 20:55:56 +08:00
|
|
|
|
2005-12-02 01:34:21 +08:00
|
|
|
class CLexer:
|
|
|
|
"""A lexer for the C language, tokenize the input by reading and
|
|
|
|
analyzing it line by line"""
|
|
|
|
def __init__(self, input):
|
|
|
|
self.input = input
|
2011-02-16 23:57:50 +08:00
|
|
|
self.tokens = []
|
|
|
|
self.line = ""
|
|
|
|
self.lineno = 0
|
2005-12-02 01:34:21 +08:00
|
|
|
|
|
|
|
def getline(self):
|
|
|
|
line = ''
|
2011-02-16 23:57:50 +08:00
|
|
|
while line == '':
|
|
|
|
line = self.input.readline()
|
|
|
|
if not line:
|
|
|
|
return None
|
2018-03-20 14:49:08 +08:00
|
|
|
self.lineno += 1
|
|
|
|
line = line.strip()
|
2011-02-16 23:57:50 +08:00
|
|
|
if line == '':
|
|
|
|
continue
|
|
|
|
while line[-1] == '\\':
|
|
|
|
line = line[:-1]
|
2018-03-20 14:49:08 +08:00
|
|
|
n = self.input.readline().strip()
|
|
|
|
self.lineno += 1
|
2011-02-16 23:57:50 +08:00
|
|
|
if not n:
|
|
|
|
break
|
2018-03-20 14:49:08 +08:00
|
|
|
line += n
|
2005-12-02 01:34:21 +08:00
|
|
|
return line
|
2008-02-06 03:27:37 +08:00
|
|
|
|
2005-12-02 01:34:21 +08:00
|
|
|
def getlineno(self):
|
|
|
|
return self.lineno
|
|
|
|
|
|
|
|
def push(self, token):
|
2013-02-07 15:22:01 +08:00
|
|
|
self.tokens.insert(0, token)
|
2005-12-02 01:34:21 +08:00
|
|
|
|
|
|
|
def debug(self):
|
2018-03-15 17:30:03 +08:00
|
|
|
print("Last token: ", self.last)
|
|
|
|
print("Token queue: ", self.tokens)
|
2018-03-20 14:49:07 +08:00
|
|
|
print("Line %d end: " % self.lineno, self.line)
|
2005-12-02 01:34:21 +08:00
|
|
|
|
|
|
|
def token(self):
|
|
|
|
while self.tokens == []:
|
2011-02-16 23:57:50 +08:00
|
|
|
if self.line == "":
|
|
|
|
line = self.getline()
|
|
|
|
else:
|
|
|
|
line = self.line
|
|
|
|
self.line = ""
|
2013-08-22 17:16:03 +08:00
|
|
|
if line is None:
|
2011-02-16 23:57:50 +08:00
|
|
|
return None
|
|
|
|
|
|
|
|
if line[0] == '#':
|
2018-03-20 14:49:04 +08:00
|
|
|
self.tokens = [('preproc', word) for word in line.split()]
|
2014-12-04 20:06:32 +08:00
|
|
|
|
|
|
|
# We might have whitespace between the '#' and preproc
|
|
|
|
# macro name, so instead of having a single token element
|
|
|
|
# of '#define' we might end up with '#' and 'define'. This
|
|
|
|
# merges them back together
|
|
|
|
if self.tokens[0][1] == "#":
|
2018-03-20 14:49:05 +08:00
|
|
|
self.tokens[0] = ('preproc', "#" + self.tokens[1][1])
|
|
|
|
del self.tokens[1]
|
2020-05-19 19:27:15 +08:00
|
|
|
|
|
|
|
if self.tokens[0][1] == "#define" and "(" in self.tokens[1][1]:
|
|
|
|
newtokens = [self.tokens[0]]
|
|
|
|
|
|
|
|
endArg = self.tokens[1][1].find(")")
|
|
|
|
if endArg != -1:
|
scripts: emit raw enum value in API build description
Currently the value for an enum is only emitted if it is a plain
string. If the enum is an integer or hex value, or a complex code block,
it is omitted from the API build. This fixes that by emitting the raw
value if no string value is present.
With this change:
<macro name='LIBVIR_CHECK_VERSION'
file='libvirt-common'
params='major,minor,micro'>
<macro name='LIBVIR_VERSION_NUMBER'
file='libvirt-common'>
<macro name='VIR_COPY_CPUMAP'
file='libvirt-domain'
params='cpumaps,maplen,vcpu,cpumap'>
...snip...
<macro name='LIBVIR_CHECK_VERSION'
file='libvirt-common'
params='major,minor,micro'
raw='((major) * 1000000 + (minor) * 1000 + (micro) <= LIBVIR_VERSION_NUMBER)'>
<macro name='LIBVIR_VERSION_NUMBER'
file='libvirt-common'
raw='6004000'>
<macro name='VIR_COPY_CPUMAP'
file='libvirt-domain'
params='cpumaps,maplen,vcpu,cpumap'
raw='memcpy(cpumap, VIR_GET_CPUMAP(cpumaps, maplen, vcpu), maplen)'>
...snip...
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
2020-05-19 19:32:19 +08:00
|
|
|
extra = self.tokens[1][1][endArg + 1:]
|
|
|
|
name = self.tokens[1][1][0:endArg + 1]
|
2020-05-19 19:27:15 +08:00
|
|
|
newtokens.append(('preproc', name))
|
|
|
|
if extra != "":
|
|
|
|
newtokens.append(('preproc', extra))
|
|
|
|
else:
|
|
|
|
name = self.tokens[1][1]
|
|
|
|
for token in self.tokens[2:]:
|
|
|
|
if name is not None:
|
|
|
|
name = name + token[1]
|
|
|
|
if ")" in token[1]:
|
|
|
|
newtokens.append(('preproc', name))
|
|
|
|
name = None
|
|
|
|
else:
|
|
|
|
newtokens.append(token)
|
|
|
|
self.tokens = newtokens
|
2013-02-07 15:22:01 +08:00
|
|
|
break
|
2019-09-24 20:40:44 +08:00
|
|
|
nline = len(line)
|
2011-02-16 23:57:50 +08:00
|
|
|
if line[0] == '"' or line[0] == "'":
|
2018-03-20 14:49:06 +08:00
|
|
|
quote = line[0]
|
|
|
|
i = 1
|
|
|
|
while quote not in line[i:]:
|
|
|
|
i = len(line)
|
|
|
|
nextline = self.getline()
|
|
|
|
if nextline is None:
|
|
|
|
return None
|
|
|
|
line += nextline
|
|
|
|
|
|
|
|
tok, self.line = line[1:].split(quote, 1)
|
2011-02-16 23:57:50 +08:00
|
|
|
self.last = ('string', tok)
|
|
|
|
return self.last
|
|
|
|
|
2018-03-20 14:48:53 +08:00
|
|
|
if line.startswith("/*"):
|
2011-02-16 23:57:50 +08:00
|
|
|
line = line[2:]
|
|
|
|
found = 0
|
|
|
|
tok = ""
|
|
|
|
while found == 0:
|
|
|
|
i = 0
|
2019-09-24 20:40:44 +08:00
|
|
|
nline = len(line)
|
|
|
|
while i < nline:
|
2019-09-24 23:47:02 +08:00
|
|
|
if line[i] == '*' and i + 1 < nline and line[i + 1] == '/':
|
|
|
|
self.line = line[i + 2:]
|
|
|
|
line = line[:i - 1]
|
2019-09-24 20:40:44 +08:00
|
|
|
nline = i
|
2011-02-16 23:57:50 +08:00
|
|
|
found = 1
|
|
|
|
break
|
|
|
|
i = i + 1
|
|
|
|
if tok != "":
|
|
|
|
tok = tok + "\n"
|
|
|
|
tok = tok + line
|
|
|
|
if found == 0:
|
|
|
|
line = self.getline()
|
2013-08-22 17:16:03 +08:00
|
|
|
if line is None:
|
2011-02-16 23:57:50 +08:00
|
|
|
return None
|
|
|
|
self.last = ('comment', tok)
|
|
|
|
return self.last
|
2018-03-20 14:48:53 +08:00
|
|
|
if line.startswith("//"):
|
2011-02-16 23:57:50 +08:00
|
|
|
line = line[2:]
|
|
|
|
self.last = ('comment', line)
|
|
|
|
return self.last
|
|
|
|
i = 0
|
2019-09-24 20:40:44 +08:00
|
|
|
while i < nline:
|
2019-09-24 23:47:02 +08:00
|
|
|
if line[i] == '/' and i + 1 < nline and line[i + 1] == '/':
|
2011-02-16 23:57:50 +08:00
|
|
|
self.line = line[i:]
|
|
|
|
line = line[:i]
|
|
|
|
break
|
2019-09-24 23:47:02 +08:00
|
|
|
if line[i] == '/' and i + 1 < nline and line[i + 1] == '*':
|
2011-02-16 23:57:50 +08:00
|
|
|
self.line = line[i:]
|
|
|
|
line = line[:i]
|
|
|
|
break
|
|
|
|
if line[i] == '"' or line[i] == "'":
|
|
|
|
self.line = line[i:]
|
|
|
|
line = line[:i]
|
|
|
|
break
|
|
|
|
i = i + 1
|
2019-09-24 20:40:44 +08:00
|
|
|
nline = len(line)
|
2011-02-16 23:57:50 +08:00
|
|
|
i = 0
|
2019-09-24 20:40:44 +08:00
|
|
|
while i < nline:
|
2011-02-16 23:57:50 +08:00
|
|
|
if line[i] == ' ' or line[i] == '\t':
|
|
|
|
i = i + 1
|
|
|
|
continue
|
2018-03-20 14:48:53 +08:00
|
|
|
if line[i].isalnum():
|
2011-02-16 23:57:50 +08:00
|
|
|
s = i
|
2019-09-24 20:40:44 +08:00
|
|
|
while i < nline:
|
2018-03-20 14:48:53 +08:00
|
|
|
if line[i] not in " \t(){}:;,+-*/%&!|[]=><":
|
2011-02-16 23:57:50 +08:00
|
|
|
i = i + 1
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
self.tokens.append(('name', line[s:i]))
|
|
|
|
continue
|
2018-03-20 14:48:53 +08:00
|
|
|
if line[i] in "(){}:;,[]":
|
2011-02-16 23:57:50 +08:00
|
|
|
self.tokens.append(('sep', line[i]))
|
|
|
|
i = i + 1
|
|
|
|
continue
|
2018-03-20 14:48:53 +08:00
|
|
|
if line[i] in "+-*><=/%&!|.":
|
2019-09-24 20:40:44 +08:00
|
|
|
if line[i] == '.' and i + 2 < nline and \
|
2019-09-24 23:47:02 +08:00
|
|
|
line[i + 1] == '.' and line[i + 2] == '.':
|
2011-02-16 23:57:50 +08:00
|
|
|
self.tokens.append(('name', '...'))
|
|
|
|
i = i + 3
|
|
|
|
continue
|
|
|
|
|
2021-09-21 00:24:45 +08:00
|
|
|
j = i
|
2021-09-22 00:04:00 +08:00
|
|
|
while (j + 1) < nline and line[j + 1] in "+-*><=/%&!|":
|
2021-09-21 00:24:45 +08:00
|
|
|
j = j + 1
|
|
|
|
|
2021-09-22 00:04:00 +08:00
|
|
|
self.tokens.append(('op', line[i:j + 1]))
|
2021-09-21 00:24:45 +08:00
|
|
|
i = j + 1
|
2011-02-16 23:57:50 +08:00
|
|
|
continue
|
|
|
|
s = i
|
2019-09-24 20:40:44 +08:00
|
|
|
while i < nline:
|
2018-03-20 14:48:53 +08:00
|
|
|
if line[i] not in " \t(){}:;,+-*/%&!|[]=><":
|
2011-02-16 23:57:50 +08:00
|
|
|
i = i + 1
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
self.tokens.append(('name', line[s:i]))
|
|
|
|
|
|
|
|
tok = self.tokens[0]
|
|
|
|
self.tokens = self.tokens[1:]
|
|
|
|
self.last = tok
|
|
|
|
return tok
|
2008-02-06 03:27:37 +08:00
|
|
|
|
2019-09-24 20:55:56 +08:00
|
|
|
|
2005-12-02 01:34:21 +08:00
|
|
|
class CParser:
|
|
|
|
"""The C module parser"""
|
2018-03-20 14:48:54 +08:00
|
|
|
def __init__(self, filename, idx=None):
|
2005-12-02 01:34:21 +08:00
|
|
|
self.filename = filename
|
2011-02-16 23:57:50 +08:00
|
|
|
if len(filename) > 2 and filename[-2:] == '.h':
|
|
|
|
self.is_header = 1
|
|
|
|
else:
|
|
|
|
self.is_header = 0
|
2005-12-02 01:34:21 +08:00
|
|
|
self.input = open(filename)
|
2011-02-16 23:57:50 +08:00
|
|
|
self.lexer = CLexer(self.input)
|
2013-08-22 17:16:03 +08:00
|
|
|
if idx is None:
|
2011-02-16 23:57:50 +08:00
|
|
|
self.index = index()
|
|
|
|
else:
|
|
|
|
self.index = idx
|
|
|
|
self.top_comment = ""
|
|
|
|
self.last_comment = ""
|
|
|
|
self.comment = None
|
|
|
|
self.collect_ref = 0
|
|
|
|
self.no_error = 0
|
|
|
|
self.conditionals = []
|
|
|
|
self.defines = []
|
2005-12-02 01:34:21 +08:00
|
|
|
|
|
|
|
def collect_references(self):
|
|
|
|
self.collect_ref = 1
|
|
|
|
|
|
|
|
def stop_error(self):
|
|
|
|
self.no_error = 1
|
|
|
|
|
|
|
|
def start_error(self):
|
|
|
|
self.no_error = 0
|
|
|
|
|
|
|
|
def lineno(self):
|
|
|
|
return self.lexer.getlineno()
|
|
|
|
|
2018-03-20 14:48:54 +08:00
|
|
|
def index_add(self, name, module, static, type, info=None, extra=None):
|
2011-02-16 23:57:50 +08:00
|
|
|
if self.is_header == 1:
|
|
|
|
self.index.add(name, module, module, static, type, self.lineno(),
|
|
|
|
info, extra, self.conditionals)
|
|
|
|
else:
|
|
|
|
self.index.add(name, None, module, static, type, self.lineno(),
|
|
|
|
info, extra, self.conditionals)
|
2005-12-02 01:34:21 +08:00
|
|
|
|
|
|
|
def index_add_ref(self, name, module, static, type, info=None,
|
2018-03-20 14:48:54 +08:00
|
|
|
extra=None):
|
2011-02-16 23:57:50 +08:00
|
|
|
if self.is_header == 1:
|
|
|
|
self.index.add_ref(name, module, module, static, type,
|
|
|
|
self.lineno(), info, extra, self.conditionals)
|
|
|
|
else:
|
|
|
|
self.index.add_ref(name, None, module, static, type, self.lineno(),
|
|
|
|
info, extra, self.conditionals)
|
2005-12-02 01:34:21 +08:00
|
|
|
|
|
|
|
def warning(self, msg):
|
2011-05-12 18:19:42 +08:00
|
|
|
global warnings
|
|
|
|
warnings = warnings + 1
|
2005-12-02 01:34:21 +08:00
|
|
|
if self.no_error:
|
2011-02-16 23:57:50 +08:00
|
|
|
return
|
2018-03-15 17:30:03 +08:00
|
|
|
print(msg)
|
2005-12-02 01:34:21 +08:00
|
|
|
|
|
|
|
def error(self, msg, token=-1):
|
|
|
|
if self.no_error:
|
2011-02-16 23:57:50 +08:00
|
|
|
return
|
2005-12-02 01:34:21 +08:00
|
|
|
|
2018-03-15 17:30:03 +08:00
|
|
|
print("Parse Error: " + msg)
|
2011-02-16 23:57:50 +08:00
|
|
|
if token != -1:
|
2018-03-15 17:30:03 +08:00
|
|
|
print("Got token ", token)
|
2011-02-16 23:57:50 +08:00
|
|
|
self.lexer.debug()
|
|
|
|
sys.exit(1)
|
2005-12-02 01:34:21 +08:00
|
|
|
|
|
|
|
def debug(self, msg, token=-1):
|
2018-03-15 17:30:03 +08:00
|
|
|
print("Debug: " + msg)
|
2011-02-16 23:57:50 +08:00
|
|
|
if token != -1:
|
2018-03-15 17:30:03 +08:00
|
|
|
print("Got token ", token)
|
2011-02-16 23:57:50 +08:00
|
|
|
self.lexer.debug()
|
2005-12-02 01:34:21 +08:00
|
|
|
|
|
|
|
def parseTopComment(self, comment):
|
2011-02-16 23:57:50 +08:00
|
|
|
res = {}
|
2018-03-15 17:42:44 +08:00
|
|
|
lines = comment.split("\n")
|
2011-02-16 23:57:50 +08:00
|
|
|
item = None
|
|
|
|
for line in lines:
|
2013-01-10 23:02:23 +08:00
|
|
|
line = line.lstrip().lstrip('*').lstrip()
|
2013-01-11 18:39:19 +08:00
|
|
|
|
2019-09-24 20:29:27 +08:00
|
|
|
m = re.match(r'([_.a-zA-Z0-9]+):(.*)', line)
|
2013-01-11 18:39:19 +08:00
|
|
|
if m:
|
|
|
|
item = m.group(1)
|
|
|
|
line = m.group(2).lstrip()
|
|
|
|
|
2022-11-03 18:07:12 +08:00
|
|
|
# don't include the Copyright in the last 'item'
|
|
|
|
if line.startswith("Copyright (C)"):
|
|
|
|
# truncate any whitespace originating from newlines
|
|
|
|
# before the Copyright
|
|
|
|
if item:
|
|
|
|
res[item] = res[item].rstrip()
|
|
|
|
break
|
|
|
|
|
2013-01-11 18:39:19 +08:00
|
|
|
if item:
|
2018-03-15 17:39:49 +08:00
|
|
|
if item in res:
|
2011-02-16 23:57:50 +08:00
|
|
|
res[item] = res[item] + " " + line
|
|
|
|
else:
|
|
|
|
res[item] = line
|
|
|
|
self.index.info = res
|
2005-12-02 01:34:21 +08:00
|
|
|
|
2011-06-20 11:25:34 +08:00
|
|
|
def strip_lead_star(self, line):
|
2018-03-20 14:49:01 +08:00
|
|
|
if line.lstrip().startswith('*'):
|
|
|
|
line = line.replace('*', '', 1)
|
2011-06-20 11:25:34 +08:00
|
|
|
return line
|
|
|
|
|
2022-04-23 03:23:44 +08:00
|
|
|
def cleanup_code_comment(self, comment: str, type_name="") -> str:
|
2022-04-23 03:23:43 +08:00
|
|
|
if not isinstance(comment, str) or comment == "":
|
|
|
|
return ""
|
|
|
|
|
|
|
|
lines = comment.splitlines(True)
|
2022-04-23 03:23:44 +08:00
|
|
|
|
|
|
|
# If type_name is provided, check and remove header of
|
|
|
|
# the comment block.
|
|
|
|
if type_name != "" and f"{type_name}:" in lines[0]:
|
|
|
|
del lines[0]
|
|
|
|
|
2011-06-20 11:25:34 +08:00
|
|
|
com = ""
|
|
|
|
for line in lines:
|
|
|
|
com = com + self.strip_lead_star(line)
|
2022-04-23 03:23:43 +08:00
|
|
|
return com.strip()
|
|
|
|
|
|
|
|
def cleanupComment(self):
|
|
|
|
self.comment = self.cleanup_code_comment(self.comment)
|
2011-06-20 11:25:34 +08:00
|
|
|
|
2005-12-02 01:34:21 +08:00
|
|
|
def parseComment(self, token):
|
2011-06-20 11:25:34 +08:00
|
|
|
com = token[1]
|
2005-12-02 01:34:21 +08:00
|
|
|
if self.top_comment == "":
|
2011-06-20 11:25:34 +08:00
|
|
|
self.top_comment = com
|
2013-08-22 17:16:03 +08:00
|
|
|
if self.comment is None or com[0] == '*':
|
2013-02-07 15:22:01 +08:00
|
|
|
self.comment = com
|
2011-02-16 23:57:50 +08:00
|
|
|
else:
|
2011-06-20 11:25:34 +08:00
|
|
|
self.comment = self.comment + com
|
2011-02-16 23:57:50 +08:00
|
|
|
token = self.lexer.token()
|
2005-12-02 01:34:21 +08:00
|
|
|
|
2018-03-15 17:42:44 +08:00
|
|
|
if self.comment.find("DOC_DISABLE") != -1:
|
2011-02-16 23:57:50 +08:00
|
|
|
self.stop_error()
|
2005-12-02 01:34:21 +08:00
|
|
|
|
2018-03-15 17:42:44 +08:00
|
|
|
if self.comment.find("DOC_ENABLE") != -1:
|
2011-02-16 23:57:50 +08:00
|
|
|
self.start_error()
|
2005-12-02 01:34:21 +08:00
|
|
|
|
2011-02-16 23:57:50 +08:00
|
|
|
return token
|
2005-12-02 01:34:21 +08:00
|
|
|
|
|
|
|
#
|
|
|
|
# Parse a comment block associate to a typedef
|
|
|
|
#
|
2018-03-20 14:49:02 +08:00
|
|
|
def parseTypeComment(self, name, quiet=False):
|
2005-12-02 01:34:21 +08:00
|
|
|
if name[0:2] == '__':
|
2018-03-20 14:49:02 +08:00
|
|
|
quiet = True
|
2005-12-02 01:34:21 +08:00
|
|
|
|
2013-08-22 17:16:03 +08:00
|
|
|
if self.comment is None:
|
2011-02-16 23:57:50 +08:00
|
|
|
if not quiet:
|
2018-03-20 14:49:07 +08:00
|
|
|
self.warning("Missing comment for type %s" % name)
|
2018-03-20 14:49:02 +08:00
|
|
|
return None
|
|
|
|
if not self.comment.startswith('*'):
|
2011-02-16 23:57:50 +08:00
|
|
|
if not quiet:
|
2018-03-20 14:49:07 +08:00
|
|
|
self.warning("Missing * in type comment for %s" % name)
|
2018-03-20 14:49:02 +08:00
|
|
|
return None
|
|
|
|
|
2018-03-15 17:42:44 +08:00
|
|
|
lines = self.comment.split('\n')
|
2018-03-20 14:49:02 +08:00
|
|
|
# Remove lines that contain only single asterisk
|
|
|
|
lines[:] = [line for line in lines if line.strip() != '*']
|
|
|
|
|
2018-03-20 14:49:07 +08:00
|
|
|
if lines[0] != "* %s:" % name:
|
2011-02-16 23:57:50 +08:00
|
|
|
if not quiet:
|
2018-03-20 14:49:07 +08:00
|
|
|
self.warning("Misformatted type comment for %s" % name)
|
2011-02-16 23:57:50 +08:00
|
|
|
self.warning(" Expecting '* %s:' got '%s'" % (name, lines[0]))
|
2018-03-20 14:49:02 +08:00
|
|
|
return None
|
2011-02-16 23:57:50 +08:00
|
|
|
del lines[0]
|
2018-03-20 14:49:02 +08:00
|
|
|
|
|
|
|
# Concatenate all remaining lines by striping leading asterisks
|
|
|
|
desc = " ".join([line.lstrip("*").strip() for line in lines]).strip()
|
|
|
|
|
|
|
|
if not (quiet or desc):
|
|
|
|
self.warning("Type comment for %s lack description of the macro"
|
2018-03-20 14:49:07 +08:00
|
|
|
% name)
|
2018-03-20 14:49:02 +08:00
|
|
|
|
|
|
|
return desc
|
2019-09-24 20:55:56 +08:00
|
|
|
|
2005-12-02 01:34:21 +08:00
|
|
|
#
|
|
|
|
# Parse a comment block associate to a macro
|
|
|
|
#
|
2018-03-20 14:48:54 +08:00
|
|
|
def parseMacroComment(self, name, quiet=0):
|
2011-05-31 16:41:37 +08:00
|
|
|
global ignored_macros
|
|
|
|
|
2005-12-02 01:34:21 +08:00
|
|
|
if name[0:2] == '__':
|
2011-02-16 23:57:50 +08:00
|
|
|
quiet = 1
|
2018-03-15 17:39:49 +08:00
|
|
|
if name in ignored_macros:
|
2011-05-31 16:41:37 +08:00
|
|
|
quiet = 1
|
2005-12-02 01:34:21 +08:00
|
|
|
|
|
|
|
args = []
|
2011-02-16 23:57:50 +08:00
|
|
|
desc = ""
|
2005-12-02 01:34:21 +08:00
|
|
|
|
2013-08-22 17:16:03 +08:00
|
|
|
if self.comment is None:
|
2011-02-16 23:57:50 +08:00
|
|
|
if not quiet:
|
2018-03-20 14:49:07 +08:00
|
|
|
self.warning("Missing comment for macro %s" % name)
|
|
|
|
return args, desc
|
2005-12-02 01:34:21 +08:00
|
|
|
if self.comment[0] != '*':
|
2011-02-16 23:57:50 +08:00
|
|
|
if not quiet:
|
2018-03-20 14:49:07 +08:00
|
|
|
self.warning("Missing * in macro comment for %s" % name)
|
|
|
|
return args, desc
|
2018-03-15 17:42:44 +08:00
|
|
|
lines = self.comment.split('\n')
|
2011-02-16 23:57:50 +08:00
|
|
|
if lines[0] == '*':
|
|
|
|
del lines[0]
|
2018-03-20 14:49:07 +08:00
|
|
|
if lines[0] != "* %s:" % name:
|
2011-02-16 23:57:50 +08:00
|
|
|
if not quiet:
|
2018-03-20 14:49:07 +08:00
|
|
|
self.warning("Misformatted macro comment for %s" % name)
|
2011-02-16 23:57:50 +08:00
|
|
|
self.warning(" Expecting '* %s:' got '%s'" % (name, lines[0]))
|
2018-03-20 14:49:07 +08:00
|
|
|
return args, desc
|
2011-02-16 23:57:50 +08:00
|
|
|
del lines[0]
|
|
|
|
while lines[0] == '*':
|
|
|
|
del lines[0]
|
|
|
|
while len(lines) > 0 and lines[0][0:3] == '* @':
|
2019-09-24 20:40:44 +08:00
|
|
|
prefix = lines[0][3:]
|
2011-02-16 23:57:50 +08:00
|
|
|
try:
|
2019-09-24 20:40:44 +08:00
|
|
|
arg, desc = prefix.split(':', 1)
|
2018-03-15 17:42:44 +08:00
|
|
|
desc = desc.strip()
|
|
|
|
arg = arg.strip()
|
2019-09-24 20:42:51 +08:00
|
|
|
except Exception:
|
2011-02-16 23:57:50 +08:00
|
|
|
if not quiet:
|
2018-03-20 14:49:07 +08:00
|
|
|
self.warning("Misformatted macro comment for %s" % name)
|
|
|
|
self.warning(" problem with '%s'" % lines[0])
|
2011-02-16 23:57:50 +08:00
|
|
|
del lines[0]
|
|
|
|
continue
|
|
|
|
del lines[0]
|
2019-09-24 20:40:44 +08:00
|
|
|
line = lines[0].strip()
|
|
|
|
while len(line) > 2 and line[0:3] != '* @':
|
|
|
|
while line[0] == '*':
|
|
|
|
line = line[1:]
|
|
|
|
desc = desc + ' ' + line.strip()
|
2011-02-16 23:57:50 +08:00
|
|
|
del lines[0]
|
|
|
|
if len(lines) == 0:
|
|
|
|
break
|
2019-09-24 20:40:44 +08:00
|
|
|
line = lines[0]
|
2005-12-02 01:34:21 +08:00
|
|
|
args.append((arg, desc))
|
2011-02-16 23:57:50 +08:00
|
|
|
while len(lines) > 0 and lines[0] == '*':
|
|
|
|
del lines[0]
|
|
|
|
desc = ""
|
|
|
|
while len(lines) > 0:
|
2019-09-24 20:40:44 +08:00
|
|
|
line = lines[0]
|
|
|
|
while len(line) > 0 and line[0] == '*':
|
|
|
|
line = line[1:]
|
|
|
|
line = line.strip()
|
|
|
|
desc = desc + " " + line
|
2011-02-16 23:57:50 +08:00
|
|
|
del lines[0]
|
2008-02-06 03:27:37 +08:00
|
|
|
|
2018-03-15 17:42:44 +08:00
|
|
|
desc = desc.strip()
|
2005-12-02 01:34:21 +08:00
|
|
|
|
2011-02-16 23:57:50 +08:00
|
|
|
if quiet == 0:
|
|
|
|
if desc == "":
|
2018-03-20 14:49:07 +08:00
|
|
|
self.warning("Macro comment for %s lack description of the macro" % name)
|
2005-12-02 01:34:21 +08:00
|
|
|
|
2018-03-20 14:49:07 +08:00
|
|
|
return args, desc
|
2005-12-02 01:34:21 +08:00
|
|
|
|
2018-03-20 14:49:00 +08:00
|
|
|
#
|
|
|
|
# Parse a comment block and merge the information found in the
|
|
|
|
# parameters descriptions, finally returns a block as complete
|
|
|
|
# as possible
|
|
|
|
#
|
2018-03-20 14:48:54 +08:00
|
|
|
def mergeFunctionComment(self, name, description, quiet=0):
|
2008-11-25 23:48:11 +08:00
|
|
|
global ignored_functions
|
|
|
|
|
2005-12-02 01:34:21 +08:00
|
|
|
if name == 'main':
|
2011-02-16 23:57:50 +08:00
|
|
|
quiet = 1
|
2005-12-02 01:34:21 +08:00
|
|
|
if name[0:2] == '__':
|
2011-02-16 23:57:50 +08:00
|
|
|
quiet = 1
|
2018-03-15 17:39:49 +08:00
|
|
|
if name in ignored_functions:
|
2008-11-25 23:48:11 +08:00
|
|
|
quiet = 1
|
2005-12-02 01:34:21 +08:00
|
|
|
|
2018-03-20 14:49:07 +08:00
|
|
|
ret, args = description
|
2011-02-16 23:57:50 +08:00
|
|
|
desc = ""
|
|
|
|
retdesc = ""
|
2005-12-02 01:34:21 +08:00
|
|
|
|
2013-08-22 17:16:03 +08:00
|
|
|
if self.comment is None:
|
2011-02-16 23:57:50 +08:00
|
|
|
if not quiet:
|
2018-03-20 14:49:07 +08:00
|
|
|
self.warning("Missing comment for function %s" % name)
|
|
|
|
return (ret[0], retdesc), args, desc
|
2005-12-02 01:34:21 +08:00
|
|
|
if self.comment[0] != '*':
|
2011-02-16 23:57:50 +08:00
|
|
|
if not quiet:
|
2018-03-20 14:49:07 +08:00
|
|
|
self.warning("Missing * in function comment for %s" % name)
|
|
|
|
return (ret[0], retdesc), args, desc
|
2018-03-15 17:42:44 +08:00
|
|
|
lines = self.comment.split('\n')
|
2011-02-16 23:57:50 +08:00
|
|
|
if lines[0] == '*':
|
|
|
|
del lines[0]
|
2018-03-20 14:49:07 +08:00
|
|
|
if lines[0] != "* %s:" % name:
|
2011-02-16 23:57:50 +08:00
|
|
|
if not quiet:
|
2018-03-20 14:49:07 +08:00
|
|
|
self.warning("Misformatted function comment for %s" % name)
|
2011-02-16 23:57:50 +08:00
|
|
|
self.warning(" Expecting '* %s:' got '%s'" % (name, lines[0]))
|
2018-03-20 14:49:07 +08:00
|
|
|
return (ret[0], retdesc), args, desc
|
2011-02-16 23:57:50 +08:00
|
|
|
del lines[0]
|
|
|
|
while lines[0] == '*':
|
|
|
|
del lines[0]
|
|
|
|
nbargs = len(args)
|
|
|
|
while len(lines) > 0 and lines[0][0:3] == '* @':
|
2019-09-24 20:40:44 +08:00
|
|
|
prefix = lines[0][3:]
|
2011-02-16 23:57:50 +08:00
|
|
|
try:
|
2019-09-24 20:40:44 +08:00
|
|
|
arg, desc = prefix.split(':', 1)
|
2018-03-15 17:42:44 +08:00
|
|
|
desc = desc.strip()
|
|
|
|
arg = arg.strip()
|
2019-09-24 20:42:51 +08:00
|
|
|
except Exception:
|
2011-02-16 23:57:50 +08:00
|
|
|
if not quiet:
|
2018-03-20 14:49:07 +08:00
|
|
|
self.warning("Misformatted function comment for %s" % name)
|
|
|
|
self.warning(" problem with '%s'" % lines[0])
|
2011-02-16 23:57:50 +08:00
|
|
|
del lines[0]
|
|
|
|
continue
|
|
|
|
del lines[0]
|
2019-09-24 20:40:44 +08:00
|
|
|
line = lines[0].strip()
|
|
|
|
while len(line) > 2 and line[0:3] != '* @':
|
|
|
|
while line[0] == '*':
|
|
|
|
line = line[1:]
|
|
|
|
desc = desc + ' ' + line.strip()
|
2011-02-16 23:57:50 +08:00
|
|
|
del lines[0]
|
|
|
|
if len(lines) == 0:
|
|
|
|
break
|
2019-09-24 20:40:44 +08:00
|
|
|
line = lines[0]
|
2011-02-16 23:57:50 +08:00
|
|
|
i = 0
|
|
|
|
while i < nbargs:
|
|
|
|
if args[i][1] == arg:
|
|
|
|
args[i] = (args[i][0], arg, desc)
|
2013-02-07 15:22:01 +08:00
|
|
|
break
|
2011-02-16 23:57:50 +08:00
|
|
|
i = i + 1
|
|
|
|
if i >= nbargs:
|
|
|
|
if not quiet:
|
2019-09-25 00:26:29 +08:00
|
|
|
self.warning("Unable to find arg %s from function comment for %s" %
|
|
|
|
(arg, name))
|
2011-02-16 23:57:50 +08:00
|
|
|
while len(lines) > 0 and lines[0] == '*':
|
|
|
|
del lines[0]
|
|
|
|
desc = None
|
|
|
|
while len(lines) > 0:
|
2019-09-24 20:40:44 +08:00
|
|
|
line = lines[0]
|
2011-02-16 23:57:50 +08:00
|
|
|
i = 0
|
|
|
|
# Remove all leading '*', followed by at most one ' ' character
|
2013-07-30 16:21:11 +08:00
|
|
|
# since we need to preserve correct indentation of code examples
|
2019-09-24 20:40:44 +08:00
|
|
|
while i < len(line) and line[i] == '*':
|
2011-02-16 23:57:50 +08:00
|
|
|
i = i + 1
|
|
|
|
if i > 0:
|
2019-09-24 20:40:44 +08:00
|
|
|
if i < len(line) and line[i] == ' ':
|
2011-02-16 23:57:50 +08:00
|
|
|
i = i + 1
|
2019-09-24 20:40:44 +08:00
|
|
|
line = line[i:]
|
|
|
|
if len(line) >= 6 and line[0:7] == "Returns":
|
2011-02-16 23:57:50 +08:00
|
|
|
try:
|
2019-09-24 20:40:44 +08:00
|
|
|
line = line.split(' ', 1)[1]
|
2019-09-24 20:42:51 +08:00
|
|
|
except Exception:
|
2019-09-24 20:40:44 +08:00
|
|
|
line = ""
|
|
|
|
retdesc = line.strip()
|
2011-02-16 23:57:50 +08:00
|
|
|
del lines[0]
|
|
|
|
while len(lines) > 0:
|
2019-09-24 20:40:44 +08:00
|
|
|
line = lines[0]
|
|
|
|
while len(line) > 0 and line[0] == '*':
|
|
|
|
line = line[1:]
|
|
|
|
line = line.strip()
|
|
|
|
retdesc = retdesc + " " + line
|
2011-02-16 23:57:50 +08:00
|
|
|
del lines[0]
|
|
|
|
else:
|
|
|
|
if desc is not None:
|
2019-09-24 20:40:44 +08:00
|
|
|
desc = desc + "\n" + line
|
2011-02-16 23:57:50 +08:00
|
|
|
else:
|
2019-09-24 20:40:44 +08:00
|
|
|
desc = line
|
2011-02-16 23:57:50 +08:00
|
|
|
del lines[0]
|
|
|
|
|
|
|
|
if desc is None:
|
|
|
|
desc = ""
|
2018-03-15 17:42:44 +08:00
|
|
|
retdesc = retdesc.strip()
|
|
|
|
desc = desc.strip()
|
2011-02-16 23:57:50 +08:00
|
|
|
|
|
|
|
if quiet == 0:
|
2018-03-20 14:49:00 +08:00
|
|
|
#
|
|
|
|
# report missing comments
|
|
|
|
#
|
2011-02-16 23:57:50 +08:00
|
|
|
i = 0
|
|
|
|
while i < nbargs:
|
2013-08-22 17:16:03 +08:00
|
|
|
if args[i][2] is None and args[i][0] != "void" and args[i][1] is not None:
|
2011-02-16 23:57:50 +08:00
|
|
|
self.warning("Function comment for %s lacks description of arg %s" % (name, args[i][1]))
|
|
|
|
i = i + 1
|
|
|
|
if retdesc == "" and ret[0] != "void":
|
2018-03-20 14:49:07 +08:00
|
|
|
self.warning("Function comment for %s lacks description of return value" % name)
|
2011-02-16 23:57:50 +08:00
|
|
|
if desc == "":
|
2018-03-20 14:49:07 +08:00
|
|
|
self.warning("Function comment for %s lacks description of the function" % name)
|
2011-02-16 23:57:50 +08:00
|
|
|
|
2018-03-20 14:49:07 +08:00
|
|
|
return (ret[0], retdesc), args, desc
|
2005-12-02 01:34:21 +08:00
|
|
|
|
|
|
|
def parsePreproc(self, token):
|
2011-02-16 23:57:50 +08:00
|
|
|
if debug:
|
2018-03-15 17:30:03 +08:00
|
|
|
print("=> preproc ", token, self.lexer.tokens)
|
2005-12-02 01:34:21 +08:00
|
|
|
name = token[1]
|
2011-02-16 23:57:50 +08:00
|
|
|
if name == "#include":
|
|
|
|
token = self.lexer.token()
|
2013-08-22 17:16:03 +08:00
|
|
|
if token is None:
|
2011-02-16 23:57:50 +08:00
|
|
|
return None
|
|
|
|
if token[0] == 'preproc':
|
|
|
|
self.index_add(token[1], self.filename, not self.is_header,
|
2019-09-25 00:26:29 +08:00
|
|
|
"include")
|
2011-02-16 23:57:50 +08:00
|
|
|
return self.lexer.token()
|
|
|
|
return token
|
|
|
|
if name == "#define":
|
|
|
|
token = self.lexer.token()
|
2013-08-22 17:16:03 +08:00
|
|
|
if token is None:
|
2011-02-16 23:57:50 +08:00
|
|
|
return None
|
|
|
|
if token[0] == 'preproc':
|
2018-03-20 14:49:00 +08:00
|
|
|
# TODO macros with arguments
|
2011-02-16 23:57:50 +08:00
|
|
|
name = token[1]
|
|
|
|
lst = []
|
|
|
|
token = self.lexer.token()
|
2019-09-25 00:26:29 +08:00
|
|
|
while (token is not None and token[0] == 'preproc' and
|
|
|
|
token[1][0] != '#'):
|
2011-02-16 23:57:50 +08:00
|
|
|
lst.append(token[1])
|
|
|
|
token = self.lexer.token()
|
2020-05-19 19:30:57 +08:00
|
|
|
|
|
|
|
paramStart = name.find("(")
|
|
|
|
params = None
|
|
|
|
if paramStart != -1:
|
scripts: emit raw enum value in API build description
Currently the value for an enum is only emitted if it is a plain
string. If the enum is an integer or hex value, or a complex code block,
it is omitted from the API build. This fixes that by emitting the raw
value if no string value is present.
With this change:
<macro name='LIBVIR_CHECK_VERSION'
file='libvirt-common'
params='major,minor,micro'>
<macro name='LIBVIR_VERSION_NUMBER'
file='libvirt-common'>
<macro name='VIR_COPY_CPUMAP'
file='libvirt-domain'
params='cpumaps,maplen,vcpu,cpumap'>
...snip...
<macro name='LIBVIR_CHECK_VERSION'
file='libvirt-common'
params='major,minor,micro'
raw='((major) * 1000000 + (minor) * 1000 + (micro) <= LIBVIR_VERSION_NUMBER)'>
<macro name='LIBVIR_VERSION_NUMBER'
file='libvirt-common'
raw='6004000'>
<macro name='VIR_COPY_CPUMAP'
file='libvirt-domain'
params='cpumaps,maplen,vcpu,cpumap'
raw='memcpy(cpumap, VIR_GET_CPUMAP(cpumaps, maplen, vcpu), maplen)'>
...snip...
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
2020-05-19 19:32:19 +08:00
|
|
|
params = name[paramStart + 1:-1]
|
2020-05-19 19:30:57 +08:00
|
|
|
name = name[0:paramStart]
|
2016-04-21 20:31:23 +08:00
|
|
|
|
|
|
|
# skip hidden macros
|
|
|
|
if name in hidden_macros:
|
|
|
|
return token
|
2018-12-13 22:53:50 +08:00
|
|
|
if name[-2:] == "_H" or name[-8:] == "_H_ALLOW":
|
|
|
|
return token
|
2016-04-21 20:31:23 +08:00
|
|
|
|
2015-06-05 17:48:59 +08:00
|
|
|
strValue = None
|
scripts: emit raw enum value in API build description
Currently the value for an enum is only emitted if it is a plain
string. If the enum is an integer or hex value, or a complex code block,
it is omitted from the API build. This fixes that by emitting the raw
value if no string value is present.
With this change:
<macro name='LIBVIR_CHECK_VERSION'
file='libvirt-common'
params='major,minor,micro'>
<macro name='LIBVIR_VERSION_NUMBER'
file='libvirt-common'>
<macro name='VIR_COPY_CPUMAP'
file='libvirt-domain'
params='cpumaps,maplen,vcpu,cpumap'>
...snip...
<macro name='LIBVIR_CHECK_VERSION'
file='libvirt-common'
params='major,minor,micro'
raw='((major) * 1000000 + (minor) * 1000 + (micro) <= LIBVIR_VERSION_NUMBER)'>
<macro name='LIBVIR_VERSION_NUMBER'
file='libvirt-common'
raw='6004000'>
<macro name='VIR_COPY_CPUMAP'
file='libvirt-domain'
params='cpumaps,maplen,vcpu,cpumap'
raw='memcpy(cpumap, VIR_GET_CPUMAP(cpumaps, maplen, vcpu), maplen)'>
...snip...
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
2020-05-19 19:32:19 +08:00
|
|
|
rawValue = None
|
2015-06-05 17:48:59 +08:00
|
|
|
if len(lst) == 1 and lst[0][0] == '"' and lst[0][-1] == '"':
|
|
|
|
strValue = lst[0][1:-1]
|
scripts: emit raw enum value in API build description
Currently the value for an enum is only emitted if it is a plain
string. If the enum is an integer or hex value, or a complex code block,
it is omitted from the API build. This fixes that by emitting the raw
value if no string value is present.
With this change:
<macro name='LIBVIR_CHECK_VERSION'
file='libvirt-common'
params='major,minor,micro'>
<macro name='LIBVIR_VERSION_NUMBER'
file='libvirt-common'>
<macro name='VIR_COPY_CPUMAP'
file='libvirt-domain'
params='cpumaps,maplen,vcpu,cpumap'>
...snip...
<macro name='LIBVIR_CHECK_VERSION'
file='libvirt-common'
params='major,minor,micro'
raw='((major) * 1000000 + (minor) * 1000 + (micro) <= LIBVIR_VERSION_NUMBER)'>
<macro name='LIBVIR_VERSION_NUMBER'
file='libvirt-common'
raw='6004000'>
<macro name='VIR_COPY_CPUMAP'
file='libvirt-domain'
params='cpumaps,maplen,vcpu,cpumap'
raw='memcpy(cpumap, VIR_GET_CPUMAP(cpumaps, maplen, vcpu), maplen)'>
...snip...
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
2020-05-19 19:32:19 +08:00
|
|
|
else:
|
|
|
|
rawValue = " ".join(lst)
|
2015-06-05 17:48:59 +08:00
|
|
|
(args, desc) = self.parseMacroComment(name, not self.is_header)
|
2011-02-16 23:57:50 +08:00
|
|
|
self.index_add(name, self.filename, not self.is_header,
|
scripts: emit raw enum value in API build description
Currently the value for an enum is only emitted if it is a plain
string. If the enum is an integer or hex value, or a complex code block,
it is omitted from the API build. This fixes that by emitting the raw
value if no string value is present.
With this change:
<macro name='LIBVIR_CHECK_VERSION'
file='libvirt-common'
params='major,minor,micro'>
<macro name='LIBVIR_VERSION_NUMBER'
file='libvirt-common'>
<macro name='VIR_COPY_CPUMAP'
file='libvirt-domain'
params='cpumaps,maplen,vcpu,cpumap'>
...snip...
<macro name='LIBVIR_CHECK_VERSION'
file='libvirt-common'
params='major,minor,micro'
raw='((major) * 1000000 + (minor) * 1000 + (micro) <= LIBVIR_VERSION_NUMBER)'>
<macro name='LIBVIR_VERSION_NUMBER'
file='libvirt-common'
raw='6004000'>
<macro name='VIR_COPY_CPUMAP'
file='libvirt-domain'
params='cpumaps,maplen,vcpu,cpumap'
raw='memcpy(cpumap, VIR_GET_CPUMAP(cpumaps, maplen, vcpu), maplen)'>
...snip...
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
2020-05-19 19:32:19 +08:00
|
|
|
"macro", (args, desc, params, strValue, rawValue))
|
2011-02-16 23:57:50 +08:00
|
|
|
return token
|
|
|
|
|
|
|
|
#
|
|
|
|
# Processing of conditionals modified by Bill 1/1/05
|
|
|
|
#
|
|
|
|
# We process conditionals (i.e. tokens from #ifdef, #ifndef,
|
|
|
|
# #if, #else and #endif) for headers and mainline code,
|
|
|
|
# store the ones from the header in libxml2-api.xml, and later
|
|
|
|
# (in the routine merge_public) verify that the two (header and
|
|
|
|
# mainline code) agree.
|
|
|
|
#
|
|
|
|
# There is a small problem with processing the headers. Some of
|
|
|
|
# the variables are not concerned with enabling / disabling of
|
|
|
|
# library functions (e.g. '__XML_PARSER_H__'), and we don't want
|
|
|
|
# them to be included in libxml2-api.xml, or involved in
|
|
|
|
# the check between the header and the mainline code. To
|
|
|
|
# accomplish this, we ignore any conditional which doesn't include
|
|
|
|
# the string 'ENABLED'
|
|
|
|
#
|
|
|
|
if name == "#ifdef":
|
|
|
|
apstr = self.lexer.tokens[0][1]
|
|
|
|
try:
|
|
|
|
self.defines.append(apstr)
|
2018-03-15 17:42:44 +08:00
|
|
|
if apstr.find('ENABLED') != -1:
|
2011-02-16 23:57:50 +08:00
|
|
|
self.conditionals.append("defined(%s)" % apstr)
|
2019-09-24 20:42:51 +08:00
|
|
|
except Exception:
|
2011-02-16 23:57:50 +08:00
|
|
|
pass
|
|
|
|
elif name == "#ifndef":
|
|
|
|
apstr = self.lexer.tokens[0][1]
|
|
|
|
try:
|
|
|
|
self.defines.append(apstr)
|
2018-03-15 17:42:44 +08:00
|
|
|
if apstr.find('ENABLED') != -1:
|
2011-02-16 23:57:50 +08:00
|
|
|
self.conditionals.append("!defined(%s)" % apstr)
|
2019-09-24 20:42:51 +08:00
|
|
|
except Exception:
|
2011-02-16 23:57:50 +08:00
|
|
|
pass
|
|
|
|
elif name == "#if":
|
|
|
|
apstr = ""
|
|
|
|
for tok in self.lexer.tokens:
|
|
|
|
if apstr != "":
|
|
|
|
apstr = apstr + " "
|
|
|
|
apstr = apstr + tok[1]
|
|
|
|
try:
|
|
|
|
self.defines.append(apstr)
|
2018-03-15 17:42:44 +08:00
|
|
|
if apstr.find('ENABLED') != -1:
|
2011-02-16 23:57:50 +08:00
|
|
|
self.conditionals.append(apstr)
|
2019-09-24 20:42:51 +08:00
|
|
|
except Exception:
|
2011-02-16 23:57:50 +08:00
|
|
|
pass
|
|
|
|
elif name == "#else":
|
2019-09-25 00:26:29 +08:00
|
|
|
if (self.conditionals != [] and
|
|
|
|
self.defines[-1].find('ENABLED') != -1):
|
2011-02-16 23:57:50 +08:00
|
|
|
self.conditionals[-1] = "!(%s)" % self.conditionals[-1]
|
|
|
|
elif name == "#endif":
|
2019-09-25 00:26:29 +08:00
|
|
|
if (self.conditionals != [] and
|
|
|
|
self.defines[-1].find('ENABLED') != -1):
|
2011-02-16 23:57:50 +08:00
|
|
|
self.conditionals = self.conditionals[:-1]
|
|
|
|
self.defines = self.defines[:-1]
|
|
|
|
token = self.lexer.token()
|
2019-09-25 00:26:29 +08:00
|
|
|
while (token is not None and token[0] == 'preproc' and
|
|
|
|
token[1][0] != '#'):
|
2011-02-16 23:57:50 +08:00
|
|
|
token = self.lexer.token()
|
|
|
|
return token
|
2005-12-02 01:34:21 +08:00
|
|
|
|
2018-03-20 14:49:00 +08:00
|
|
|
#
|
|
|
|
# token acquisition on top of the lexer, it handle internally
|
|
|
|
# preprocessor and comments since they are logically not part of
|
|
|
|
# the program structure.
|
|
|
|
#
|
2005-12-07 00:50:31 +08:00
|
|
|
def push(self, tok):
|
|
|
|
self.lexer.push(tok)
|
|
|
|
|
2005-12-02 01:34:21 +08:00
|
|
|
def token(self):
|
|
|
|
global ignored_words
|
|
|
|
|
|
|
|
token = self.lexer.token()
|
2013-08-22 17:16:03 +08:00
|
|
|
while token is not None:
|
2011-02-16 23:57:50 +08:00
|
|
|
if token[0] == 'comment':
|
|
|
|
token = self.parseComment(token)
|
|
|
|
continue
|
|
|
|
elif token[0] == 'preproc':
|
|
|
|
token = self.parsePreproc(token)
|
|
|
|
continue
|
|
|
|
elif token[0] == "name" and token[1] == "__const":
|
|
|
|
token = ("name", "const")
|
|
|
|
return token
|
|
|
|
elif token[0] == "name" and token[1] == "__attribute":
|
|
|
|
token = self.lexer.token()
|
2013-08-22 17:16:03 +08:00
|
|
|
while token is not None and token[1] != ";":
|
2011-02-16 23:57:50 +08:00
|
|
|
token = self.lexer.token()
|
|
|
|
return token
|
2018-03-15 17:39:49 +08:00
|
|
|
elif token[0] == "name" and token[1] in ignored_words:
|
2011-02-16 23:57:50 +08:00
|
|
|
(n, info) = ignored_words[token[1]]
|
|
|
|
i = 0
|
|
|
|
while i < n:
|
|
|
|
token = self.lexer.token()
|
|
|
|
i = i + 1
|
|
|
|
token = self.lexer.token()
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
if debug:
|
2018-03-15 17:30:03 +08:00
|
|
|
print("=> ", token)
|
2011-02-16 23:57:50 +08:00
|
|
|
return token
|
|
|
|
return None
|
2005-12-02 01:34:21 +08:00
|
|
|
|
2018-03-20 14:49:00 +08:00
|
|
|
#
|
|
|
|
# Parse a typedef, it records the type and its name.
|
|
|
|
#
|
2005-12-02 01:34:21 +08:00
|
|
|
def parseTypedef(self, token):
|
2013-08-22 17:16:03 +08:00
|
|
|
if token is None:
|
2011-02-16 23:57:50 +08:00
|
|
|
return None
|
2022-04-23 03:23:39 +08:00
|
|
|
|
|
|
|
# With typedef enum types, we can have comments parsed before the
|
|
|
|
# enum themselves. The parsing of enum values does clear the
|
|
|
|
# self.comment variable. So we store it here for later.
|
|
|
|
typedef_comment = self.comment
|
|
|
|
|
2011-02-16 23:57:50 +08:00
|
|
|
token = self.parseType(token)
|
2013-08-22 17:16:03 +08:00
|
|
|
if token is None:
|
2011-02-16 23:57:50 +08:00
|
|
|
self.error("parsing typedef")
|
|
|
|
return None
|
|
|
|
base_type = self.type
|
|
|
|
type = base_type
|
2018-03-20 14:49:00 +08:00
|
|
|
# self.debug("end typedef type", token)
|
2013-08-22 17:16:03 +08:00
|
|
|
while token is not None:
|
2011-02-16 23:57:50 +08:00
|
|
|
if token[0] == "name":
|
|
|
|
name = token[1]
|
|
|
|
signature = self.signature
|
2013-08-22 17:16:03 +08:00
|
|
|
if signature is not None:
|
2018-03-15 17:42:44 +08:00
|
|
|
type = type.split('(')[0]
|
2011-02-16 23:57:50 +08:00
|
|
|
d = self.mergeFunctionComment(name,
|
2019-09-25 00:26:29 +08:00
|
|
|
((type, None), signature), 1)
|
2011-02-16 23:57:50 +08:00
|
|
|
self.index_add(name, self.filename, not self.is_header,
|
2019-09-25 00:26:29 +08:00
|
|
|
"functype", d)
|
2011-02-16 23:57:50 +08:00
|
|
|
else:
|
|
|
|
if base_type == "struct":
|
|
|
|
self.index_add(name, self.filename, not self.is_header,
|
2019-09-25 00:26:29 +08:00
|
|
|
"struct", type)
|
2011-02-16 23:57:50 +08:00
|
|
|
base_type = "struct " + name
|
|
|
|
else:
|
2022-04-23 03:23:39 +08:00
|
|
|
self.comment = typedef_comment
|
2011-02-16 23:57:50 +08:00
|
|
|
info = self.parseTypeComment(name, 1)
|
|
|
|
self.index_add(name, self.filename, not self.is_header,
|
2019-09-25 00:26:29 +08:00
|
|
|
"typedef", type, info)
|
2011-02-16 23:57:50 +08:00
|
|
|
token = self.token()
|
|
|
|
else:
|
|
|
|
self.error("parsing typedef: expecting a name")
|
|
|
|
return token
|
2018-03-20 14:49:00 +08:00
|
|
|
# self.debug("end typedef", token)
|
2013-08-22 17:16:03 +08:00
|
|
|
if token is not None and token[0] == 'sep' and token[1] == ',':
|
2011-02-16 23:57:50 +08:00
|
|
|
type = base_type
|
|
|
|
token = self.token()
|
2013-08-22 17:16:03 +08:00
|
|
|
while token is not None and token[0] == "op":
|
2011-02-16 23:57:50 +08:00
|
|
|
type = type + token[1]
|
|
|
|
token = self.token()
|
2013-08-22 17:16:03 +08:00
|
|
|
elif token is not None and token[0] == 'sep' and token[1] == ';':
|
2013-02-07 15:22:01 +08:00
|
|
|
break
|
2013-08-22 17:16:03 +08:00
|
|
|
elif token is not None and token[0] == 'name':
|
2011-02-16 23:57:50 +08:00
|
|
|
type = base_type
|
2013-02-07 15:22:01 +08:00
|
|
|
continue
|
2011-02-16 23:57:50 +08:00
|
|
|
else:
|
|
|
|
self.error("parsing typedef: expecting ';'", token)
|
|
|
|
return token
|
|
|
|
token = self.token()
|
|
|
|
return token
|
2008-02-06 03:27:37 +08:00
|
|
|
|
2018-03-20 14:49:00 +08:00
|
|
|
#
|
|
|
|
# Parse a C code block, used for functions it parse till
|
|
|
|
# the balancing } included
|
|
|
|
#
|
2005-12-02 01:34:21 +08:00
|
|
|
def parseBlock(self, token):
|
2013-08-22 17:16:03 +08:00
|
|
|
while token is not None:
|
2011-02-16 23:57:50 +08:00
|
|
|
if token[0] == "sep" and token[1] == "{":
|
|
|
|
token = self.token()
|
|
|
|
token = self.parseBlock(token)
|
|
|
|
elif token[0] == "sep" and token[1] == "}":
|
|
|
|
self.comment = None
|
|
|
|
token = self.token()
|
|
|
|
return token
|
|
|
|
else:
|
|
|
|
if self.collect_ref == 1:
|
|
|
|
oldtok = token
|
|
|
|
token = self.token()
|
|
|
|
if oldtok[0] == "name" and oldtok[1][0:3] == "vir":
|
|
|
|
if token[0] == "sep" and token[1] == "(":
|
|
|
|
self.index_add_ref(oldtok[1], self.filename,
|
2019-09-25 00:26:29 +08:00
|
|
|
0, "function")
|
2011-02-16 23:57:50 +08:00
|
|
|
token = self.token()
|
|
|
|
elif token[0] == "name":
|
|
|
|
token = self.token()
|
|
|
|
if token[0] == "sep" and (token[1] == ";" or
|
|
|
|
token[1] == "," or token[1] == "="):
|
|
|
|
self.index_add_ref(oldtok[1], self.filename,
|
2019-09-25 00:26:29 +08:00
|
|
|
0, "type")
|
2011-02-16 23:57:50 +08:00
|
|
|
elif oldtok[0] == "name" and oldtok[1][0:4] == "XEN_":
|
|
|
|
self.index_add_ref(oldtok[1], self.filename,
|
2019-09-25 00:26:29 +08:00
|
|
|
0, "typedef")
|
2011-02-16 23:57:50 +08:00
|
|
|
elif oldtok[0] == "name" and oldtok[1][0:7] == "LIBXEN_":
|
|
|
|
self.index_add_ref(oldtok[1], self.filename,
|
2019-09-25 00:26:29 +08:00
|
|
|
0, "typedef")
|
2011-02-16 23:57:50 +08:00
|
|
|
|
|
|
|
else:
|
|
|
|
token = self.token()
|
|
|
|
return token
|
2005-12-02 01:34:21 +08:00
|
|
|
|
2018-03-20 14:49:00 +08:00
|
|
|
#
|
|
|
|
# Parse a C struct definition till the balancing }
|
|
|
|
#
|
2005-12-02 01:34:21 +08:00
|
|
|
def parseStruct(self, token):
|
|
|
|
fields = []
|
2018-03-20 14:49:00 +08:00
|
|
|
# self.debug("start parseStruct", token)
|
2013-08-22 17:16:03 +08:00
|
|
|
while token is not None:
|
2011-02-16 23:57:50 +08:00
|
|
|
if token[0] == "sep" and token[1] == "{":
|
|
|
|
token = self.token()
|
|
|
|
token = self.parseTypeBlock(token)
|
|
|
|
elif token[0] == "sep" and token[1] == "}":
|
|
|
|
self.struct_fields = fields
|
2018-03-20 14:49:00 +08:00
|
|
|
# self.debug("end parseStruct", token)
|
|
|
|
# print(fields)
|
2011-02-16 23:57:50 +08:00
|
|
|
token = self.token()
|
|
|
|
return token
|
|
|
|
else:
|
|
|
|
base_type = self.type
|
2018-03-20 14:49:00 +08:00
|
|
|
# self.debug("before parseType", token)
|
2011-02-16 23:57:50 +08:00
|
|
|
token = self.parseType(token)
|
2018-03-20 14:49:00 +08:00
|
|
|
# self.debug("after parseType", token)
|
2013-08-22 17:16:03 +08:00
|
|
|
if token is not None and token[0] == "name":
|
2011-02-16 23:57:50 +08:00
|
|
|
fname = token[1]
|
|
|
|
token = self.token()
|
|
|
|
if token[0] == "sep" and token[1] == ";":
|
|
|
|
self.comment = None
|
|
|
|
token = self.token()
|
2011-06-20 11:25:34 +08:00
|
|
|
self.cleanupComment()
|
|
|
|
if self.type == "union":
|
|
|
|
fields.append((self.type, fname, self.comment,
|
|
|
|
self.union_fields))
|
|
|
|
self.union_fields = []
|
|
|
|
else:
|
|
|
|
fields.append((self.type, fname, self.comment))
|
2011-02-16 23:57:50 +08:00
|
|
|
self.comment = None
|
|
|
|
else:
|
|
|
|
self.error("parseStruct: expecting ;", token)
|
2013-08-22 17:16:03 +08:00
|
|
|
elif token is not None and token[0] == "sep" and token[1] == "{":
|
2011-02-16 23:57:50 +08:00
|
|
|
token = self.token()
|
|
|
|
token = self.parseTypeBlock(token)
|
2013-08-22 17:16:03 +08:00
|
|
|
if token is not None and token[0] == "name":
|
2011-02-16 23:57:50 +08:00
|
|
|
token = self.token()
|
2013-08-22 17:16:03 +08:00
|
|
|
if token is not None and token[0] == "sep" and token[1] == ";":
|
2011-02-16 23:57:50 +08:00
|
|
|
token = self.token()
|
|
|
|
else:
|
|
|
|
self.error("parseStruct: expecting ;", token)
|
|
|
|
else:
|
|
|
|
self.error("parseStruct: name", token)
|
|
|
|
token = self.token()
|
2013-02-07 15:22:01 +08:00
|
|
|
self.type = base_type
|
2005-12-02 01:34:21 +08:00
|
|
|
self.struct_fields = fields
|
2018-03-20 14:49:00 +08:00
|
|
|
# self.debug("end parseStruct", token)
|
|
|
|
# print(fields)
|
2011-02-16 23:57:50 +08:00
|
|
|
return token
|
2005-12-02 01:34:21 +08:00
|
|
|
|
2018-03-20 14:49:00 +08:00
|
|
|
#
|
|
|
|
# Parse a C union definition till the balancing }
|
|
|
|
#
|
2011-06-20 11:25:34 +08:00
|
|
|
def parseUnion(self, token):
|
|
|
|
fields = []
|
|
|
|
# self.debug("start parseUnion", token)
|
2013-08-22 17:16:03 +08:00
|
|
|
while token is not None:
|
2011-06-20 11:25:34 +08:00
|
|
|
if token[0] == "sep" and token[1] == "{":
|
|
|
|
token = self.token()
|
|
|
|
token = self.parseTypeBlock(token)
|
|
|
|
elif token[0] == "sep" and token[1] == "}":
|
|
|
|
self.union_fields = fields
|
|
|
|
# self.debug("end parseUnion", token)
|
2018-03-15 17:30:03 +08:00
|
|
|
# print(fields)
|
2011-06-20 11:25:34 +08:00
|
|
|
token = self.token()
|
|
|
|
return token
|
|
|
|
else:
|
|
|
|
base_type = self.type
|
|
|
|
# self.debug("before parseType", token)
|
|
|
|
token = self.parseType(token)
|
|
|
|
# self.debug("after parseType", token)
|
2013-08-22 17:16:03 +08:00
|
|
|
if token is not None and token[0] == "name":
|
2011-06-20 11:25:34 +08:00
|
|
|
fname = token[1]
|
|
|
|
token = self.token()
|
|
|
|
if token[0] == "sep" and token[1] == ";":
|
|
|
|
self.comment = None
|
|
|
|
token = self.token()
|
|
|
|
self.cleanupComment()
|
|
|
|
fields.append((self.type, fname, self.comment))
|
|
|
|
self.comment = None
|
|
|
|
else:
|
|
|
|
self.error("parseUnion: expecting ;", token)
|
2013-08-22 17:16:03 +08:00
|
|
|
elif token is not None and token[0] == "sep" and token[1] == "{":
|
2011-06-20 11:25:34 +08:00
|
|
|
token = self.token()
|
|
|
|
token = self.parseTypeBlock(token)
|
2013-08-22 17:16:03 +08:00
|
|
|
if token is not None and token[0] == "name":
|
2011-06-20 11:25:34 +08:00
|
|
|
token = self.token()
|
2013-08-22 17:16:03 +08:00
|
|
|
if token is not None and token[0] == "sep" and token[1] == ";":
|
2011-06-20 11:25:34 +08:00
|
|
|
token = self.token()
|
|
|
|
else:
|
|
|
|
self.error("parseUnion: expecting ;", token)
|
|
|
|
else:
|
|
|
|
self.error("parseUnion: name", token)
|
|
|
|
token = self.token()
|
2013-02-07 15:22:01 +08:00
|
|
|
self.type = base_type
|
2011-06-20 11:25:34 +08:00
|
|
|
self.union_fields = fields
|
|
|
|
# self.debug("end parseUnion", token)
|
2018-03-15 17:30:03 +08:00
|
|
|
# print(fields)
|
2011-06-20 11:25:34 +08:00
|
|
|
return token
|
|
|
|
|
2018-03-20 14:49:00 +08:00
|
|
|
#
|
|
|
|
# Parse a C enum block, parse till the balancing }
|
|
|
|
#
|
2005-12-02 01:34:21 +08:00
|
|
|
def parseEnumBlock(self, token):
|
|
|
|
self.enums = []
|
2011-02-16 23:57:50 +08:00
|
|
|
name = None
|
|
|
|
comment = ""
|
2014-06-26 04:54:36 +08:00
|
|
|
value = "-1"
|
2017-07-22 14:05:12 +08:00
|
|
|
commentsBeforeVal = self.comment is not None
|
2013-08-22 17:16:03 +08:00
|
|
|
while token is not None:
|
2011-02-16 23:57:50 +08:00
|
|
|
if token[0] == "sep" and token[1] == "{":
|
|
|
|
token = self.token()
|
|
|
|
token = self.parseTypeBlock(token)
|
|
|
|
elif token[0] == "sep" and token[1] == "}":
|
2013-08-22 17:16:03 +08:00
|
|
|
if name is not None:
|
2011-06-20 11:25:34 +08:00
|
|
|
self.cleanupComment()
|
2013-08-22 17:16:03 +08:00
|
|
|
if self.comment is not None:
|
2011-02-16 23:57:50 +08:00
|
|
|
comment = self.comment
|
|
|
|
self.comment = None
|
|
|
|
self.enums.append((name, value, comment))
|
|
|
|
token = self.token()
|
|
|
|
return token
|
|
|
|
elif token[0] == "name":
|
2015-06-05 17:38:51 +08:00
|
|
|
self.cleanupComment()
|
|
|
|
if name is not None:
|
|
|
|
if self.comment is not None:
|
2018-03-15 17:42:44 +08:00
|
|
|
comment = self.comment.strip()
|
2015-06-05 17:38:51 +08:00
|
|
|
self.comment = None
|
|
|
|
self.enums.append((name, value, comment))
|
|
|
|
name = token[1]
|
|
|
|
comment = ""
|
|
|
|
token = self.token()
|
|
|
|
if token[0] == "op" and token[1][0] == "=":
|
|
|
|
value = ""
|
|
|
|
if len(token[1]) > 1:
|
|
|
|
value = token[1][1:]
|
2011-02-16 23:57:50 +08:00
|
|
|
token = self.token()
|
2015-06-05 17:38:51 +08:00
|
|
|
while token[0] != "sep" or (token[1] != ',' and
|
2019-09-25 00:26:29 +08:00
|
|
|
token[1] != '}'):
|
docs: Teach apibuild to deal with (1U << 31) too
The apibuild script is a terrifying beast that parses some source
files of ours and produces an XML representation of them. When it
comes to parsing enums we have in some header files, it tries to
be clever and detect a value that an enum member has (or if it is
an alias for a different member). Whilst doing that it has to
deal with values we give to the members in many formats. At some
places we just pass the value in decimal:
VIR_DOMAIN_BLOCK_JOB_TYPE_PULL = 1,
in other places, we use the aliasing:
VIR_CONNECT_GET_ALL_DOMAINS_STATS_ACTIVE = VIR_CONNECT_LIST_DOMAINS_ACTIVE,
and in other places bitwise shifts are used:
VIR_CONNECT_GET_ALL_DOMAINS_STATS_ENFORCE_STATS = 1 << 31, /* enforce requested stats */
The script tries to parse all of these resulting in the following
tokens: "1", "VIR_CONNECT_LIST_DOMAINS_ACTIVE", "1<<31"; Then, the
script tries to turn these into integers using python's eval()
function. This function succeeds on the first and the last
tokens. But, if we were to modify the last example so that it's
of the following form:
VIR_CONNECT_GET_ALL_DOMAINS_STATS_ENFORCE_STATS = 1U << 31, /* enforce requested stats */
the token representing enum's member value will then be "1U<<31".
So our parsing is good. Unfortunately, python is not aware of the
difference between signed and unsigned C types, therefore eval()
fails over this token and the parser falls back thinking it's an
alias to another enum member. Well it's not.
The solution is to transform [0-9]U into [0-9] as for our
purposes here it's the same thing.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2016-05-28 19:21:43 +08:00
|
|
|
# We might be dealing with '1U << 12' here
|
2019-09-24 20:29:27 +08:00
|
|
|
value = value + re.sub(r"^(\d+)U$", "\\1", token[1])
|
2011-02-16 23:57:50 +08:00
|
|
|
token = self.token()
|
2015-06-05 17:38:51 +08:00
|
|
|
else:
|
|
|
|
try:
|
|
|
|
value = "%d" % (int(value) + 1)
|
2019-09-24 20:42:51 +08:00
|
|
|
except Exception:
|
2018-03-20 14:49:07 +08:00
|
|
|
self.warning("Failed to compute value of enum %s" % name)
|
2018-03-20 14:48:47 +08:00
|
|
|
value = ""
|
2015-06-05 17:38:51 +08:00
|
|
|
if token[0] == "sep" and token[1] == ",":
|
2017-07-22 14:05:12 +08:00
|
|
|
if commentsBeforeVal:
|
|
|
|
self.cleanupComment()
|
|
|
|
self.enums.append((name, value, self.comment))
|
|
|
|
name = comment = self.comment = None
|
2015-06-05 17:38:51 +08:00
|
|
|
token = self.token()
|
2011-02-16 23:57:50 +08:00
|
|
|
else:
|
|
|
|
token = self.token()
|
|
|
|
return token
|
2005-12-02 01:34:21 +08:00
|
|
|
|
2012-05-15 18:59:00 +08:00
|
|
|
def parseVirEnumDecl(self, token):
|
|
|
|
if token[0] != "name":
|
|
|
|
self.error("parsing VIR_ENUM_DECL: expecting name", token)
|
|
|
|
|
|
|
|
token = self.token()
|
|
|
|
|
|
|
|
if token[0] != "sep":
|
|
|
|
self.error("parsing VIR_ENUM_DECL: expecting ')'", token)
|
|
|
|
|
|
|
|
if token[1] != ')':
|
|
|
|
self.error("parsing VIR_ENUM_DECL: expecting ')'", token)
|
|
|
|
|
|
|
|
token = self.token()
|
|
|
|
if token[0] == "sep" and token[1] == ';':
|
|
|
|
token = self.token()
|
|
|
|
|
|
|
|
return token
|
|
|
|
|
|
|
|
def parseVirEnumImpl(self, token):
|
|
|
|
# First the type name
|
|
|
|
if token[0] != "name":
|
|
|
|
self.error("parsing VIR_ENUM_IMPL: expecting name", token)
|
|
|
|
|
|
|
|
token = self.token()
|
|
|
|
|
|
|
|
if token[0] != "sep":
|
|
|
|
self.error("parsing VIR_ENUM_IMPL: expecting ','", token)
|
|
|
|
|
|
|
|
if token[1] != ',':
|
|
|
|
self.error("parsing VIR_ENUM_IMPL: expecting ','", token)
|
|
|
|
token = self.token()
|
|
|
|
|
|
|
|
# Now the sentinel name
|
|
|
|
if token[0] != "name":
|
|
|
|
self.error("parsing VIR_ENUM_IMPL: expecting name", token)
|
|
|
|
|
|
|
|
token = self.token()
|
|
|
|
|
|
|
|
if token[0] != "sep":
|
|
|
|
self.error("parsing VIR_ENUM_IMPL: expecting ','", token)
|
|
|
|
|
|
|
|
if token[1] != ',':
|
|
|
|
self.error("parsing VIR_ENUM_IMPL: expecting ','", token)
|
|
|
|
|
|
|
|
token = self.token()
|
|
|
|
|
|
|
|
# Now a list of strings (optional comments)
|
|
|
|
while token is not None:
|
|
|
|
isGettext = False
|
|
|
|
# First a string, optionally with N_(...)
|
|
|
|
if token[0] == 'name':
|
|
|
|
if token[1] != 'N_':
|
|
|
|
self.error("parsing VIR_ENUM_IMPL: expecting 'N_'", token)
|
|
|
|
token = self.token()
|
|
|
|
if token[0] != "sep" or token[1] != '(':
|
|
|
|
self.error("parsing VIR_ENUM_IMPL: expecting '('", token)
|
|
|
|
token = self.token()
|
|
|
|
isGettext = True
|
|
|
|
|
|
|
|
if token[0] != "string":
|
|
|
|
self.error("parsing VIR_ENUM_IMPL: expecting a string", token)
|
|
|
|
token = self.token()
|
|
|
|
elif token[0] == "string":
|
|
|
|
token = self.token()
|
|
|
|
else:
|
|
|
|
self.error("parsing VIR_ENUM_IMPL: expecting a string", token)
|
|
|
|
|
|
|
|
# Then a separator
|
|
|
|
if token[0] == "sep":
|
|
|
|
if isGettext and token[1] == ')':
|
|
|
|
token = self.token()
|
|
|
|
|
|
|
|
if token[1] == ',':
|
|
|
|
token = self.token()
|
|
|
|
|
|
|
|
if token[1] == ')':
|
|
|
|
token = self.token()
|
|
|
|
break
|
|
|
|
|
|
|
|
# Then an optional comment
|
|
|
|
if token[0] == "comment":
|
|
|
|
token = self.token()
|
|
|
|
|
|
|
|
if token[0] == "sep" and token[1] == ';':
|
|
|
|
token = self.token()
|
|
|
|
|
|
|
|
return token
|
|
|
|
|
2014-02-28 20:16:17 +08:00
|
|
|
def parseVirLogInit(self, token):
|
|
|
|
if token[0] != "string":
|
|
|
|
self.error("parsing VIR_LOG_INIT: expecting string", token)
|
|
|
|
|
|
|
|
token = self.token()
|
|
|
|
|
|
|
|
if token[0] != "sep":
|
|
|
|
self.error("parsing VIR_LOG_INIT: expecting ')'", token)
|
|
|
|
|
|
|
|
if token[1] != ')':
|
|
|
|
self.error("parsing VIR_LOG_INIT: expecting ')'", token)
|
|
|
|
|
|
|
|
token = self.token()
|
|
|
|
if token[0] == "sep" and token[1] == ';':
|
|
|
|
token = self.token()
|
|
|
|
|
|
|
|
return token
|
|
|
|
|
2018-03-20 14:49:00 +08:00
|
|
|
#
|
|
|
|
# Parse a C definition block, used for structs or unions it parse till
|
|
|
|
# the balancing }
|
|
|
|
#
|
2005-12-02 01:34:21 +08:00
|
|
|
def parseTypeBlock(self, token):
|
2013-08-22 17:16:03 +08:00
|
|
|
while token is not None:
|
2011-02-16 23:57:50 +08:00
|
|
|
if token[0] == "sep" and token[1] == "{":
|
|
|
|
token = self.token()
|
|
|
|
token = self.parseTypeBlock(token)
|
|
|
|
elif token[0] == "sep" and token[1] == "}":
|
|
|
|
token = self.token()
|
|
|
|
return token
|
|
|
|
else:
|
|
|
|
token = self.token()
|
|
|
|
return token
|
2005-12-02 01:34:21 +08:00
|
|
|
|
2018-03-20 14:49:00 +08:00
|
|
|
#
|
|
|
|
# Parse a type: the fact that the type name can either occur after
|
|
|
|
# the definition or within the definition makes it a little harder
|
|
|
|
# if inside, the name token is pushed back before returning
|
|
|
|
#
|
2005-12-02 01:34:21 +08:00
|
|
|
def parseType(self, token):
|
|
|
|
self.type = ""
|
2011-02-16 23:57:50 +08:00
|
|
|
self.struct_fields = []
|
2011-06-20 11:25:34 +08:00
|
|
|
self.union_fields = []
|
2005-12-02 01:34:21 +08:00
|
|
|
self.signature = None
|
2013-08-22 17:16:03 +08:00
|
|
|
if token is None:
|
2011-02-16 23:57:50 +08:00
|
|
|
return token
|
|
|
|
|
2018-03-20 14:48:53 +08:00
|
|
|
while (token[0] == "name" and
|
|
|
|
token[1] in ["const", "unsigned", "signed"]):
|
2011-02-16 23:57:50 +08:00
|
|
|
if self.type == "":
|
|
|
|
self.type = token[1]
|
|
|
|
else:
|
|
|
|
self.type = self.type + " " + token[1]
|
|
|
|
token = self.token()
|
2005-12-02 01:34:21 +08:00
|
|
|
|
2005-12-07 00:50:31 +08:00
|
|
|
if token[0] == "name" and token[1] == "long":
|
2011-02-16 23:57:50 +08:00
|
|
|
if self.type == "":
|
|
|
|
self.type = token[1]
|
|
|
|
else:
|
|
|
|
self.type = self.type + " " + token[1]
|
|
|
|
|
|
|
|
# some read ahead for long long
|
|
|
|
oldtmp = token
|
|
|
|
token = self.token()
|
|
|
|
if token[0] == "name" and token[1] == "long":
|
|
|
|
self.type = self.type + " " + token[1]
|
|
|
|
else:
|
|
|
|
self.push(token)
|
|
|
|
token = oldtmp
|
|
|
|
|
2011-06-20 11:25:34 +08:00
|
|
|
oldtmp = token
|
|
|
|
token = self.token()
|
2011-02-16 23:57:50 +08:00
|
|
|
if token[0] == "name" and token[1] == "int":
|
2011-06-20 11:25:34 +08:00
|
|
|
self.type = self.type + " " + token[1]
|
|
|
|
else:
|
|
|
|
self.push(token)
|
|
|
|
token = oldtmp
|
2005-12-07 00:50:31 +08:00
|
|
|
|
|
|
|
elif token[0] == "name" and token[1] == "short":
|
2011-02-16 23:57:50 +08:00
|
|
|
if self.type == "":
|
|
|
|
self.type = token[1]
|
|
|
|
else:
|
|
|
|
self.type = self.type + " " + token[1]
|
2008-02-06 03:27:37 +08:00
|
|
|
|
2005-12-02 01:34:21 +08:00
|
|
|
elif token[0] == "name" and token[1] == "struct":
|
2011-02-16 23:57:50 +08:00
|
|
|
if self.type == "":
|
|
|
|
self.type = token[1]
|
|
|
|
else:
|
|
|
|
self.type = self.type + " " + token[1]
|
|
|
|
token = self.token()
|
|
|
|
nametok = None
|
|
|
|
if token[0] == "name":
|
|
|
|
nametok = token
|
|
|
|
token = self.token()
|
2013-08-22 17:16:03 +08:00
|
|
|
if token is not None and token[0] == "sep" and token[1] == "{":
|
2011-02-16 23:57:50 +08:00
|
|
|
token = self.token()
|
|
|
|
token = self.parseStruct(token)
|
2013-08-22 17:16:03 +08:00
|
|
|
elif token is not None and token[0] == "op" and token[1] == "*":
|
2011-02-16 23:57:50 +08:00
|
|
|
self.type = self.type + " " + nametok[1] + " *"
|
|
|
|
token = self.token()
|
2013-08-22 17:16:03 +08:00
|
|
|
while token is not None and token[0] == "op" and token[1] == "*":
|
2011-02-16 23:57:50 +08:00
|
|
|
self.type = self.type + " *"
|
|
|
|
token = self.token()
|
|
|
|
if token[0] == "name":
|
|
|
|
nametok = token
|
|
|
|
token = self.token()
|
|
|
|
else:
|
|
|
|
self.error("struct : expecting name", token)
|
|
|
|
return token
|
2013-08-22 17:16:03 +08:00
|
|
|
elif token is not None and token[0] == "name" and nametok is not None:
|
2011-02-16 23:57:50 +08:00
|
|
|
self.type = self.type + " " + nametok[1]
|
|
|
|
return token
|
|
|
|
|
2013-08-22 17:16:03 +08:00
|
|
|
if nametok is not None:
|
2011-02-16 23:57:50 +08:00
|
|
|
self.lexer.push(token)
|
|
|
|
token = nametok
|
|
|
|
return token
|
2005-12-02 01:34:21 +08:00
|
|
|
|
2011-06-20 11:25:34 +08:00
|
|
|
elif token[0] == "name" and token[1] == "union":
|
|
|
|
if self.type == "":
|
|
|
|
self.type = token[1]
|
|
|
|
else:
|
|
|
|
self.type = self.type + " " + token[1]
|
|
|
|
token = self.token()
|
|
|
|
nametok = None
|
|
|
|
if token[0] == "name":
|
|
|
|
nametok = token
|
|
|
|
token = self.token()
|
2013-08-22 17:16:03 +08:00
|
|
|
if token is not None and token[0] == "sep" and token[1] == "{":
|
2011-06-20 11:25:34 +08:00
|
|
|
token = self.token()
|
|
|
|
token = self.parseUnion(token)
|
2013-08-22 17:16:03 +08:00
|
|
|
elif token is not None and token[0] == "name" and nametok is not None:
|
2011-06-20 11:25:34 +08:00
|
|
|
self.type = self.type + " " + nametok[1]
|
|
|
|
return token
|
|
|
|
|
2013-08-22 17:16:03 +08:00
|
|
|
if nametok is not None:
|
2011-06-20 11:25:34 +08:00
|
|
|
self.lexer.push(token)
|
|
|
|
token = nametok
|
|
|
|
return token
|
|
|
|
|
2005-12-02 01:34:21 +08:00
|
|
|
elif token[0] == "name" and token[1] == "enum":
|
2011-02-16 23:57:50 +08:00
|
|
|
if self.type == "":
|
|
|
|
self.type = token[1]
|
|
|
|
else:
|
|
|
|
self.type = self.type + " " + token[1]
|
|
|
|
self.enums = []
|
|
|
|
token = self.token()
|
2013-08-22 17:16:03 +08:00
|
|
|
if token is not None and token[0] == "sep" and token[1] == "{":
|
2017-07-22 14:05:12 +08:00
|
|
|
# drop comments before the enum block
|
|
|
|
self.comment = None
|
2011-02-16 23:57:50 +08:00
|
|
|
token = self.token()
|
|
|
|
token = self.parseEnumBlock(token)
|
|
|
|
else:
|
|
|
|
self.error("parsing enum: expecting '{'", token)
|
|
|
|
enum_type = None
|
2013-08-22 17:16:03 +08:00
|
|
|
if token is not None and token[0] != "name":
|
2011-02-16 23:57:50 +08:00
|
|
|
self.lexer.push(token)
|
|
|
|
token = ("name", "enum")
|
|
|
|
else:
|
|
|
|
enum_type = token[1]
|
|
|
|
for enum in self.enums:
|
|
|
|
self.index_add(enum[0], self.filename,
|
|
|
|
not self.is_header, "enum",
|
|
|
|
(enum[1], enum[2], enum_type))
|
|
|
|
return token
|
2012-05-15 18:59:00 +08:00
|
|
|
elif token[0] == "name" and token[1] == "VIR_ENUM_DECL":
|
|
|
|
token = self.token()
|
2013-08-22 17:16:03 +08:00
|
|
|
if token is not None and token[0] == "sep" and token[1] == "(":
|
2012-05-15 18:59:00 +08:00
|
|
|
token = self.token()
|
|
|
|
token = self.parseVirEnumDecl(token)
|
|
|
|
else:
|
|
|
|
self.error("parsing VIR_ENUM_DECL: expecting '('", token)
|
2013-08-22 17:16:03 +08:00
|
|
|
if token is not None:
|
2012-05-15 18:59:00 +08:00
|
|
|
self.lexer.push(token)
|
|
|
|
token = ("name", "virenumdecl")
|
|
|
|
return token
|
|
|
|
|
|
|
|
elif token[0] == "name" and token[1] == "VIR_ENUM_IMPL":
|
|
|
|
token = self.token()
|
2013-08-22 17:16:03 +08:00
|
|
|
if token is not None and token[0] == "sep" and token[1] == "(":
|
2012-05-15 18:59:00 +08:00
|
|
|
token = self.token()
|
|
|
|
token = self.parseVirEnumImpl(token)
|
|
|
|
else:
|
|
|
|
self.error("parsing VIR_ENUM_IMPL: expecting '('", token)
|
2013-08-22 17:16:03 +08:00
|
|
|
if token is not None:
|
2012-05-15 18:59:00 +08:00
|
|
|
self.lexer.push(token)
|
|
|
|
token = ("name", "virenumimpl")
|
|
|
|
return token
|
2011-02-16 23:57:50 +08:00
|
|
|
|
2014-02-28 20:16:17 +08:00
|
|
|
elif token[0] == "name" and token[1] == "VIR_LOG_INIT":
|
|
|
|
token = self.token()
|
|
|
|
if token is not None and token[0] == "sep" and token[1] == "(":
|
|
|
|
token = self.token()
|
|
|
|
token = self.parseVirLogInit(token)
|
|
|
|
else:
|
|
|
|
self.error("parsing VIR_LOG_INIT: expecting '('", token)
|
|
|
|
if token is not None:
|
|
|
|
self.lexer.push(token)
|
|
|
|
token = ("name", "virloginit")
|
|
|
|
return token
|
|
|
|
|
2020-01-09 16:53:31 +08:00
|
|
|
elif token[0] == "name" and token[1] == "G_STATIC_ASSERT":
|
|
|
|
# skip whole line
|
|
|
|
while token is not None and not (token[0] == "sep" and
|
|
|
|
token[1] == ";"):
|
|
|
|
token = self.token()
|
|
|
|
return self.token()
|
|
|
|
|
2011-02-16 23:57:50 +08:00
|
|
|
elif token[0] == "name":
|
|
|
|
if self.type == "":
|
|
|
|
self.type = token[1]
|
|
|
|
else:
|
|
|
|
self.type = self.type + " " + token[1]
|
|
|
|
else:
|
|
|
|
self.error("parsing type %s: expecting a name" % (self.type),
|
|
|
|
token)
|
|
|
|
return token
|
|
|
|
token = self.token()
|
2013-08-22 17:16:03 +08:00
|
|
|
while token is not None and (token[0] == "op" or
|
2019-09-25 00:26:29 +08:00
|
|
|
token[0] == "name" and
|
|
|
|
token[1] == "const"):
|
2011-02-16 23:57:50 +08:00
|
|
|
self.type = self.type + " " + token[1]
|
|
|
|
token = self.token()
|
2005-12-02 01:34:21 +08:00
|
|
|
|
2018-03-20 14:49:00 +08:00
|
|
|
#
|
|
|
|
# if there is a parenthesis here, this means a function type
|
|
|
|
#
|
2013-08-22 17:16:03 +08:00
|
|
|
if token is not None and token[0] == "sep" and token[1] == '(':
|
2011-02-16 23:57:50 +08:00
|
|
|
self.type = self.type + token[1]
|
|
|
|
token = self.token()
|
2013-08-22 17:16:03 +08:00
|
|
|
while token is not None and token[0] == "op" and token[1] == '*':
|
2011-02-16 23:57:50 +08:00
|
|
|
self.type = self.type + token[1]
|
|
|
|
token = self.token()
|
2018-03-20 14:48:46 +08:00
|
|
|
if token is None or token[0] != "name":
|
2013-02-07 15:22:01 +08:00
|
|
|
self.error("parsing function type, name expected", token)
|
2011-02-16 23:57:50 +08:00
|
|
|
return token
|
|
|
|
self.type = self.type + token[1]
|
|
|
|
nametok = token
|
|
|
|
token = self.token()
|
2013-08-22 17:16:03 +08:00
|
|
|
if token is not None and token[0] == "sep" and token[1] == ')':
|
2011-02-16 23:57:50 +08:00
|
|
|
self.type = self.type + token[1]
|
|
|
|
token = self.token()
|
2013-08-22 17:16:03 +08:00
|
|
|
if token is not None and token[0] == "sep" and token[1] == '(':
|
2011-02-16 23:57:50 +08:00
|
|
|
token = self.token()
|
2013-02-07 15:22:01 +08:00
|
|
|
type = self.type
|
|
|
|
token = self.parseSignature(token)
|
|
|
|
self.type = type
|
2011-02-16 23:57:50 +08:00
|
|
|
else:
|
2013-02-07 15:22:01 +08:00
|
|
|
self.error("parsing function type, '(' expected", token)
|
2011-02-16 23:57:50 +08:00
|
|
|
return token
|
|
|
|
else:
|
2013-02-07 15:22:01 +08:00
|
|
|
self.error("parsing function type, ')' expected", token)
|
2011-02-16 23:57:50 +08:00
|
|
|
return token
|
|
|
|
self.lexer.push(token)
|
|
|
|
token = nametok
|
|
|
|
return token
|
|
|
|
|
2018-03-20 14:49:00 +08:00
|
|
|
#
|
|
|
|
# do some lookahead for arrays
|
|
|
|
#
|
2013-08-22 17:16:03 +08:00
|
|
|
if token is not None and token[0] == "name":
|
2011-02-16 23:57:50 +08:00
|
|
|
nametok = token
|
|
|
|
token = self.token()
|
2013-08-22 17:16:03 +08:00
|
|
|
if token is not None and token[0] == "sep" and token[1] == '[':
|
2011-06-20 11:25:34 +08:00
|
|
|
self.type = self.type + " " + nametok[1]
|
2013-08-22 17:16:03 +08:00
|
|
|
while token is not None and token[0] == "sep" and token[1] == '[':
|
2011-02-16 23:57:50 +08:00
|
|
|
self.type = self.type + token[1]
|
|
|
|
token = self.token()
|
2019-09-25 00:26:29 +08:00
|
|
|
while (token is not None and token[0] != 'sep' and
|
|
|
|
token[1] != ']' and token[1] != ';'):
|
2011-02-16 23:57:50 +08:00
|
|
|
self.type = self.type + token[1]
|
|
|
|
token = self.token()
|
2013-08-22 17:16:03 +08:00
|
|
|
if token is not None and token[0] == 'sep' and token[1] == ']':
|
2011-02-16 23:57:50 +08:00
|
|
|
self.type = self.type + token[1]
|
|
|
|
token = self.token()
|
|
|
|
else:
|
2013-02-07 15:22:01 +08:00
|
|
|
self.error("parsing array type, ']' expected", token)
|
2011-02-16 23:57:50 +08:00
|
|
|
return token
|
2013-08-22 17:16:03 +08:00
|
|
|
elif token is not None and token[0] == "sep" and token[1] == ':':
|
2018-03-20 14:49:00 +08:00
|
|
|
# remove :12 in case it's a limited int size
|
2011-02-16 23:57:50 +08:00
|
|
|
token = self.token()
|
|
|
|
token = self.token()
|
|
|
|
self.lexer.push(token)
|
|
|
|
token = nametok
|
|
|
|
|
|
|
|
return token
|
2005-12-02 01:34:21 +08:00
|
|
|
|
2018-03-20 14:49:00 +08:00
|
|
|
#
|
|
|
|
# Parse a signature: '(' has been parsed and we scan the type definition
|
|
|
|
# up to the ')' included
|
2005-12-02 01:34:21 +08:00
|
|
|
def parseSignature(self, token):
|
|
|
|
signature = []
|
2013-08-22 17:16:03 +08:00
|
|
|
if token is not None and token[0] == "sep" and token[1] == ')':
|
2011-02-16 23:57:50 +08:00
|
|
|
self.signature = []
|
|
|
|
token = self.token()
|
|
|
|
return token
|
2013-08-22 17:16:03 +08:00
|
|
|
while token is not None:
|
2011-02-16 23:57:50 +08:00
|
|
|
token = self.parseType(token)
|
2013-08-22 17:16:03 +08:00
|
|
|
if token is not None and token[0] == "name":
|
2011-02-16 23:57:50 +08:00
|
|
|
signature.append((self.type, token[1], None))
|
|
|
|
token = self.token()
|
2013-08-22 17:16:03 +08:00
|
|
|
elif token is not None and token[0] == "sep" and token[1] == ',':
|
2011-02-16 23:57:50 +08:00
|
|
|
token = self.token()
|
|
|
|
continue
|
2013-08-22 17:16:03 +08:00
|
|
|
elif token is not None and token[0] == "sep" and token[1] == ')':
|
2018-03-20 14:49:00 +08:00
|
|
|
# only the type was provided
|
2011-02-16 23:57:50 +08:00
|
|
|
if self.type == "...":
|
|
|
|
signature.append((self.type, "...", None))
|
|
|
|
else:
|
|
|
|
signature.append((self.type, None, None))
|
2013-08-22 17:16:03 +08:00
|
|
|
if token is not None and token[0] == "sep":
|
2011-02-16 23:57:50 +08:00
|
|
|
if token[1] == ',':
|
|
|
|
token = self.token()
|
|
|
|
continue
|
|
|
|
elif token[1] == ')':
|
|
|
|
token = self.token()
|
|
|
|
break
|
|
|
|
self.signature = signature
|
|
|
|
return token
|
2005-12-02 01:34:21 +08:00
|
|
|
|
2011-05-30 20:36:41 +08:00
|
|
|
# this dict contains the functions that are allowed to use [unsigned]
|
|
|
|
# long for legacy reasons in their signature and return type. this list is
|
|
|
|
# fixed. new procedures and public APIs have to use [unsigned] long long
|
2018-03-20 14:48:46 +08:00
|
|
|
long_legacy_functions = {
|
|
|
|
"virGetVersion": (False, ("libVer", "typeVer")),
|
|
|
|
"virConnectGetLibVersion": (False, ("libVer")),
|
|
|
|
"virConnectGetVersion": (False, ("hvVer")),
|
|
|
|
"virDomainGetMaxMemory": (True, ()),
|
|
|
|
"virDomainMigrate": (False, ("flags", "bandwidth")),
|
|
|
|
"virDomainMigrate2": (False, ("flags", "bandwidth")),
|
|
|
|
"virDomainMigrateBegin3": (False, ("flags", "bandwidth")),
|
|
|
|
"virDomainMigrateConfirm3": (False, ("flags", "bandwidth")),
|
|
|
|
"virDomainMigrateDirect": (False, ("flags", "bandwidth")),
|
|
|
|
"virDomainMigrateFinish": (False, ("flags")),
|
|
|
|
"virDomainMigrateFinish2": (False, ("flags")),
|
|
|
|
"virDomainMigrateFinish3": (False, ("flags")),
|
|
|
|
"virDomainMigratePeer2Peer": (False, ("flags", "bandwidth")),
|
|
|
|
"virDomainMigratePerform": (False, ("flags", "bandwidth")),
|
|
|
|
"virDomainMigratePerform3": (False, ("flags", "bandwidth")),
|
|
|
|
"virDomainMigratePrepare": (False, ("flags", "bandwidth")),
|
|
|
|
"virDomainMigratePrepare2": (False, ("flags", "bandwidth")),
|
|
|
|
"virDomainMigratePrepare3": (False, ("flags", "bandwidth")),
|
|
|
|
"virDomainMigratePrepareTunnel": (False, ("flags", "bandwidth")),
|
|
|
|
"virDomainMigratePrepareTunnel3": (False, ("flags", "bandwidth")),
|
|
|
|
"virDomainMigrateToURI": (False, ("flags", "bandwidth")),
|
|
|
|
"virDomainMigrateToURI2": (False, ("flags", "bandwidth")),
|
|
|
|
"virDomainMigrateVersion1": (False, ("flags", "bandwidth")),
|
|
|
|
"virDomainMigrateVersion2": (False, ("flags", "bandwidth")),
|
|
|
|
"virDomainMigrateVersion3": (False, ("flags", "bandwidth")),
|
|
|
|
"virDomainMigrateSetMaxSpeed": (False, ("bandwidth")),
|
|
|
|
"virDomainSetMaxMemory": (False, ("memory")),
|
|
|
|
"virDomainSetMemory": (False, ("memory")),
|
|
|
|
"virDomainSetMemoryFlags": (False, ("memory")),
|
|
|
|
"virDomainBlockCommit": (False, ("bandwidth")),
|
|
|
|
"virDomainBlockJobSetSpeed": (False, ("bandwidth")),
|
|
|
|
"virDomainBlockPull": (False, ("bandwidth")),
|
|
|
|
"virDomainBlockRebase": (False, ("bandwidth")),
|
|
|
|
"virDomainMigrateGetMaxSpeed": (False, ("bandwidth"))
|
|
|
|
}
|
2011-05-30 20:36:41 +08:00
|
|
|
|
|
|
|
def checkLongLegacyFunction(self, name, return_type, signature):
|
|
|
|
if "long" in return_type and "long long" not in return_type:
|
|
|
|
try:
|
|
|
|
if not CParser.long_legacy_functions[name][0]:
|
|
|
|
raise Exception()
|
2019-09-24 20:42:51 +08:00
|
|
|
except Exception:
|
2011-05-30 20:36:41 +08:00
|
|
|
self.error(("function '%s' is not allowed to return long, "
|
2018-03-20 14:49:07 +08:00
|
|
|
"use long long instead") % name)
|
2011-05-30 20:36:41 +08:00
|
|
|
|
|
|
|
for param in signature:
|
|
|
|
if "long" in param[0] and "long long" not in param[0]:
|
|
|
|
try:
|
|
|
|
if param[1] not in CParser.long_legacy_functions[name][1]:
|
|
|
|
raise Exception()
|
2019-09-24 20:42:51 +08:00
|
|
|
except Exception:
|
2011-05-30 20:36:41 +08:00
|
|
|
self.error(("function '%s' is not allowed to take long "
|
|
|
|
"parameter '%s', use long long instead")
|
|
|
|
% (name, param[1]))
|
|
|
|
|
|
|
|
# this dict contains the structs that are allowed to use [unsigned]
|
|
|
|
# long for legacy reasons. this list is fixed. new structs have to use
|
|
|
|
# [unsigned] long long
|
2018-03-20 14:48:46 +08:00
|
|
|
long_legacy_struct_fields = {
|
|
|
|
"_virDomainInfo": ("maxMem", "memory"),
|
|
|
|
"_virNodeInfo": ("memory"),
|
|
|
|
"_virDomainBlockJobInfo": ("bandwidth")
|
|
|
|
}
|
2011-05-30 20:36:41 +08:00
|
|
|
|
|
|
|
def checkLongLegacyStruct(self, name, fields):
|
|
|
|
for field in fields:
|
|
|
|
if "long" in field[0] and "long long" not in field[0]:
|
|
|
|
try:
|
|
|
|
if field[1] not in CParser.long_legacy_struct_fields[name]:
|
|
|
|
raise Exception()
|
2019-09-24 20:42:51 +08:00
|
|
|
except Exception:
|
2011-05-30 20:36:41 +08:00
|
|
|
self.error(("struct '%s' is not allowed to contain long "
|
2018-03-20 14:48:59 +08:00
|
|
|
"field '%s', use long long instead")
|
2011-05-30 20:36:41 +08:00
|
|
|
% (name, field[1]))
|
|
|
|
|
2018-03-20 14:49:00 +08:00
|
|
|
#
|
|
|
|
# Parse a global definition, be it a type, variable or function
|
|
|
|
# the extern "C" blocks are a bit nasty and require it to recurse.
|
|
|
|
#
|
2005-12-02 01:34:21 +08:00
|
|
|
def parseGlobal(self, token):
|
|
|
|
static = 0
|
|
|
|
if token[1] == 'extern':
|
2011-02-16 23:57:50 +08:00
|
|
|
token = self.token()
|
2013-08-22 17:16:03 +08:00
|
|
|
if token is None:
|
2011-02-16 23:57:50 +08:00
|
|
|
return token
|
|
|
|
if token[0] == 'string':
|
|
|
|
if token[1] == 'C':
|
|
|
|
token = self.token()
|
2013-08-22 17:16:03 +08:00
|
|
|
if token is None:
|
2011-02-16 23:57:50 +08:00
|
|
|
return token
|
|
|
|
if token[0] == 'sep' and token[1] == "{":
|
|
|
|
token = self.token()
|
2018-03-15 17:30:03 +08:00
|
|
|
# print('Entering extern "C line ', self.lineno())
|
2013-08-22 17:16:03 +08:00
|
|
|
while token is not None and (token[0] != 'sep' or
|
2019-09-25 00:26:29 +08:00
|
|
|
token[1] != "}"):
|
2011-02-16 23:57:50 +08:00
|
|
|
if token[0] == 'name':
|
|
|
|
token = self.parseGlobal(token)
|
|
|
|
else:
|
2019-09-25 00:26:29 +08:00
|
|
|
self.error(("token %s %s unexpected at the "
|
|
|
|
"top level") %
|
|
|
|
(token[0], token[1]))
|
2011-02-16 23:57:50 +08:00
|
|
|
token = self.parseGlobal(token)
|
2018-03-15 17:30:03 +08:00
|
|
|
# print('Exiting extern "C" line', self.lineno())
|
2011-02-16 23:57:50 +08:00
|
|
|
token = self.token()
|
|
|
|
return token
|
|
|
|
else:
|
|
|
|
return token
|
|
|
|
elif token[1] == 'static':
|
|
|
|
static = 1
|
|
|
|
token = self.token()
|
2018-03-20 14:48:46 +08:00
|
|
|
if token is None or token[0] != 'name':
|
2011-02-16 23:57:50 +08:00
|
|
|
return token
|
|
|
|
|
2022-04-23 03:23:44 +08:00
|
|
|
variable_comment = None
|
2011-02-16 23:57:50 +08:00
|
|
|
if token[1] == 'typedef':
|
|
|
|
token = self.token()
|
|
|
|
return self.parseTypedef(token)
|
|
|
|
else:
|
2022-04-23 03:23:44 +08:00
|
|
|
# Store block of comment that might be from variable as
|
|
|
|
# the code uses self.comment a lot and it would lose it.
|
|
|
|
variable_comment = self.comment
|
2011-02-16 23:57:50 +08:00
|
|
|
token = self.parseType(token)
|
|
|
|
type_orig = self.type
|
2013-08-22 17:16:03 +08:00
|
|
|
if token is None or token[0] != "name":
|
2011-02-16 23:57:50 +08:00
|
|
|
return token
|
|
|
|
type = type_orig
|
|
|
|
self.name = token[1]
|
|
|
|
token = self.token()
|
2013-08-22 17:16:03 +08:00
|
|
|
while token is not None and (token[0] == "sep" or token[0] == "op"):
|
2011-02-16 23:57:50 +08:00
|
|
|
if token[0] == "sep":
|
|
|
|
if token[1] == "[":
|
|
|
|
type = type + token[1]
|
|
|
|
token = self.token()
|
2018-03-20 14:48:59 +08:00
|
|
|
while token is not None and (token[0] != "sep" or
|
|
|
|
token[1] != ";"):
|
2011-02-16 23:57:50 +08:00
|
|
|
type = type + token[1]
|
|
|
|
token = self.token()
|
|
|
|
|
2013-08-22 17:16:03 +08:00
|
|
|
if token is not None and token[0] == "op" and token[1] == "=":
|
2018-03-20 14:49:00 +08:00
|
|
|
#
|
|
|
|
# Skip the initialization of the variable
|
|
|
|
#
|
2011-02-16 23:57:50 +08:00
|
|
|
token = self.token()
|
|
|
|
if token[0] == 'sep' and token[1] == '{':
|
|
|
|
token = self.token()
|
|
|
|
token = self.parseBlock(token)
|
|
|
|
else:
|
|
|
|
self.comment = None
|
2018-03-20 14:48:59 +08:00
|
|
|
while token is not None and (token[0] != "sep" or
|
|
|
|
token[1] not in ',;'):
|
|
|
|
token = self.token()
|
2011-02-16 23:57:50 +08:00
|
|
|
self.comment = None
|
2013-08-22 17:16:03 +08:00
|
|
|
if token is None or token[0] != "sep" or (token[1] != ';' and
|
2011-02-16 23:57:50 +08:00
|
|
|
token[1] != ','):
|
|
|
|
self.error("missing ';' or ',' after value")
|
|
|
|
|
2013-08-22 17:16:03 +08:00
|
|
|
if token is not None and token[0] == "sep":
|
2011-02-16 23:57:50 +08:00
|
|
|
if token[1] == ";":
|
|
|
|
self.comment = None
|
|
|
|
token = self.token()
|
|
|
|
if type == "struct":
|
2011-05-30 20:36:41 +08:00
|
|
|
self.checkLongLegacyStruct(self.name, self.struct_fields)
|
2011-02-16 23:57:50 +08:00
|
|
|
self.index_add(self.name, self.filename,
|
2019-09-25 00:26:29 +08:00
|
|
|
not self.is_header, "struct",
|
|
|
|
self.struct_fields)
|
2011-02-16 23:57:50 +08:00
|
|
|
else:
|
2022-04-23 03:23:44 +08:00
|
|
|
# Just to use the cleanupComment function.
|
|
|
|
variable_comment = self.cleanup_code_comment(variable_comment, self.name)
|
|
|
|
info = (type, variable_comment)
|
2011-02-16 23:57:50 +08:00
|
|
|
self.index_add(self.name, self.filename,
|
2022-04-23 03:23:44 +08:00
|
|
|
not self.is_header, "variable", info)
|
2011-02-16 23:57:50 +08:00
|
|
|
break
|
|
|
|
elif token[1] == "(":
|
|
|
|
token = self.token()
|
|
|
|
token = self.parseSignature(token)
|
2013-08-22 17:16:03 +08:00
|
|
|
if token is None:
|
2011-02-16 23:57:50 +08:00
|
|
|
return None
|
|
|
|
if token[0] == "sep" and token[1] == ";":
|
2011-05-30 20:36:41 +08:00
|
|
|
self.checkLongLegacyFunction(self.name, type, self.signature)
|
2011-02-16 23:57:50 +08:00
|
|
|
d = self.mergeFunctionComment(self.name,
|
2019-09-25 00:26:29 +08:00
|
|
|
((type, None),
|
|
|
|
self.signature), 1)
|
2011-02-16 23:57:50 +08:00
|
|
|
self.index_add(self.name, self.filename, static,
|
2019-09-25 00:26:29 +08:00
|
|
|
"function", d)
|
2011-02-16 23:57:50 +08:00
|
|
|
token = self.token()
|
|
|
|
elif token[0] == "sep" and token[1] == "{":
|
2011-05-30 20:36:41 +08:00
|
|
|
self.checkLongLegacyFunction(self.name, type, self.signature)
|
2011-02-16 23:57:50 +08:00
|
|
|
d = self.mergeFunctionComment(self.name,
|
2019-09-25 00:26:29 +08:00
|
|
|
((type, None),
|
|
|
|
self.signature), static)
|
2011-02-16 23:57:50 +08:00
|
|
|
self.index_add(self.name, self.filename, static,
|
2019-09-25 00:26:29 +08:00
|
|
|
"function", d)
|
2011-02-16 23:57:50 +08:00
|
|
|
token = self.token()
|
2013-02-07 15:22:01 +08:00
|
|
|
token = self.parseBlock(token)
|
2011-02-16 23:57:50 +08:00
|
|
|
elif token[1] == ',':
|
|
|
|
self.comment = None
|
|
|
|
self.index_add(self.name, self.filename, static,
|
2019-09-25 00:26:29 +08:00
|
|
|
"variable", type)
|
2011-02-16 23:57:50 +08:00
|
|
|
type = type_orig
|
|
|
|
token = self.token()
|
2013-08-22 17:16:03 +08:00
|
|
|
while token is not None and token[0] == "sep":
|
2011-02-16 23:57:50 +08:00
|
|
|
type = type + token[1]
|
|
|
|
token = self.token()
|
2013-08-22 17:16:03 +08:00
|
|
|
if token is not None and token[0] == "name":
|
2011-02-16 23:57:50 +08:00
|
|
|
self.name = token[1]
|
|
|
|
token = self.token()
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
|
|
|
|
return token
|
2005-12-02 01:34:21 +08:00
|
|
|
|
|
|
|
def parse(self):
|
2011-05-12 18:19:42 +08:00
|
|
|
if not quiet:
|
2018-03-15 17:30:03 +08:00
|
|
|
print("Parsing %s" % (self.filename))
|
2005-12-02 01:34:21 +08:00
|
|
|
token = self.token()
|
2013-08-22 17:16:03 +08:00
|
|
|
while token is not None:
|
2005-12-02 01:34:21 +08:00
|
|
|
if token[0] == 'name':
|
2011-02-16 23:57:50 +08:00
|
|
|
token = self.parseGlobal(token)
|
2005-12-02 01:34:21 +08:00
|
|
|
else:
|
2011-02-16 23:57:50 +08:00
|
|
|
self.error("token %s %s unexpected at the top level" % (
|
2019-09-25 00:26:29 +08:00
|
|
|
token[0], token[1]))
|
2011-02-16 23:57:50 +08:00
|
|
|
token = self.parseGlobal(token)
|
|
|
|
return
|
|
|
|
self.parseTopComment(self.top_comment)
|
2005-12-02 01:34:21 +08:00
|
|
|
return self.index
|
2008-02-06 03:27:37 +08:00
|
|
|
|
2005-12-02 01:34:21 +08:00
|
|
|
|
|
|
|
class docBuilder:
|
|
|
|
"""A documentation builder"""
|
2021-09-23 18:40:01 +08:00
|
|
|
def __init__(self, name, syms, path='.', directories=['.'], includes=[]):
|
2005-12-02 01:34:21 +08:00
|
|
|
self.name = name
|
2021-09-23 18:40:01 +08:00
|
|
|
self.syms = syms
|
2011-02-16 22:09:09 +08:00
|
|
|
self.path = path
|
2005-12-02 01:34:21 +08:00
|
|
|
self.directories = directories
|
2011-09-09 18:55:21 +08:00
|
|
|
if name == "libvirt":
|
2018-03-15 17:54:07 +08:00
|
|
|
self.includes = includes + list(included_files.keys())
|
2011-09-09 18:55:21 +08:00
|
|
|
elif name == "libvirt-qemu":
|
2018-03-15 17:54:07 +08:00
|
|
|
self.includes = includes + list(qemu_included_files.keys())
|
Introduce an LXC specific public API & library
This patch introduces support for LXC specific public APIs. In
common with what was done for QEMU, this creates a libvirt_lxc.so
library and libvirt/libvirt-lxc.h header file.
The actual APIs are
int virDomainLxcOpenNamespace(virDomainPtr domain,
int **fdlist,
unsigned int flags);
int virDomainLxcEnterNamespace(virDomainPtr domain,
unsigned int nfdlist,
int *fdlist,
unsigned int *noldfdlist,
int **oldfdlist,
unsigned int flags);
which provide a way to use the setns() system call to move the
calling process into the container's namespace. It is not
practical to write in a generically applicable manner. The
nearest that we could get to such an API would be an API which
allows to pass a command + argv to be executed inside a
container. Even if we had such a generic API, this LXC specific
API is still useful, because it allows the caller to maintain
the current process context, in particular any I/O streams they
have open.
NB the virDomainLxcEnterNamespace() API is special in that it
runs client side, so does not involve the internal driver API.
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-12-21 21:15:19 +08:00
|
|
|
elif name == "libvirt-lxc":
|
2018-03-15 17:54:07 +08:00
|
|
|
self.includes = includes + list(lxc_included_files.keys())
|
2015-04-15 22:23:25 +08:00
|
|
|
elif name == "libvirt-admin":
|
2018-03-15 17:54:07 +08:00
|
|
|
self.includes = includes + list(admin_included_files.keys())
|
2011-02-16 23:57:50 +08:00
|
|
|
self.modules = {}
|
|
|
|
self.headers = {}
|
2021-09-23 18:40:01 +08:00
|
|
|
self.versions = {}
|
2011-02-16 23:57:50 +08:00
|
|
|
self.idx = index()
|
2005-12-02 01:34:21 +08:00
|
|
|
self.xref = {}
|
2011-02-16 23:57:50 +08:00
|
|
|
self.index = {}
|
|
|
|
self.basename = name
|
2013-01-29 22:35:28 +08:00
|
|
|
self.errors = 0
|
2005-12-02 01:34:21 +08:00
|
|
|
|
2012-07-27 21:03:03 +08:00
|
|
|
def warning(self, msg):
|
|
|
|
global warnings
|
|
|
|
warnings = warnings + 1
|
2018-03-15 17:30:03 +08:00
|
|
|
print(msg)
|
2012-07-27 21:03:03 +08:00
|
|
|
|
2013-01-29 22:35:28 +08:00
|
|
|
def error(self, msg):
|
|
|
|
self.errors += 1
|
2018-03-15 17:30:03 +08:00
|
|
|
print("Error:", msg, file=sys.stderr)
|
2013-01-29 22:35:28 +08:00
|
|
|
|
2005-12-02 01:34:21 +08:00
|
|
|
def indexString(self, id, str):
|
2013-08-22 17:16:03 +08:00
|
|
|
if str is None:
|
2011-02-16 23:57:50 +08:00
|
|
|
return
|
2018-03-15 17:42:44 +08:00
|
|
|
str = str.replace("'", ' ')
|
|
|
|
str = str.replace('"', ' ')
|
|
|
|
str = str.replace("/", ' ')
|
|
|
|
str = str.replace('*', ' ')
|
|
|
|
str = str.replace("[", ' ')
|
|
|
|
str = str.replace("]", ' ')
|
|
|
|
str = str.replace("(", ' ')
|
|
|
|
str = str.replace(")", ' ')
|
|
|
|
str = str.replace("<", ' ')
|
|
|
|
str = str.replace('>', ' ')
|
|
|
|
str = str.replace("&", ' ')
|
|
|
|
str = str.replace('#', ' ')
|
|
|
|
str = str.replace(",", ' ')
|
|
|
|
str = str.replace('.', ' ')
|
|
|
|
str = str.replace(';', ' ')
|
|
|
|
tokens = str.split()
|
2011-02-16 23:57:50 +08:00
|
|
|
for token in tokens:
|
2018-03-17 01:49:58 +08:00
|
|
|
c = token[0]
|
|
|
|
if not re.match(r"[a-zA-Z]", c):
|
|
|
|
pass
|
|
|
|
elif len(token) < 3:
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
lower = token.lower()
|
|
|
|
# TODO: generalize this a bit
|
|
|
|
if lower == 'and' or lower == 'the':
|
2011-02-16 23:57:50 +08:00
|
|
|
pass
|
2018-03-17 01:49:58 +08:00
|
|
|
elif token in self.xref:
|
|
|
|
self.xref[token].append(id)
|
2011-02-16 23:57:50 +08:00
|
|
|
else:
|
2018-03-17 01:49:58 +08:00
|
|
|
self.xref[token] = [id]
|
2005-12-02 01:34:21 +08:00
|
|
|
|
|
|
|
def analyze(self):
|
2011-05-12 18:19:42 +08:00
|
|
|
if not quiet:
|
2018-03-15 17:30:03 +08:00
|
|
|
print("Project %s : %d headers, %d modules" % (self.name, len(self.headers.keys()), len(self.modules.keys())))
|
2011-02-16 23:57:50 +08:00
|
|
|
self.idx.analyze()
|
2005-12-02 01:34:21 +08:00
|
|
|
|
|
|
|
def scanHeaders(self):
|
2011-02-16 23:57:50 +08:00
|
|
|
for header in self.headers.keys():
|
|
|
|
parser = CParser(header)
|
|
|
|
idx = parser.parse()
|
2013-02-07 15:22:01 +08:00
|
|
|
self.headers[header] = idx
|
2011-02-16 23:57:50 +08:00
|
|
|
self.idx.merge(idx)
|
2005-12-02 01:34:21 +08:00
|
|
|
|
|
|
|
def scanModules(self):
|
2011-02-16 23:57:50 +08:00
|
|
|
for module in self.modules.keys():
|
|
|
|
parser = CParser(module)
|
|
|
|
idx = parser.parse()
|
|
|
|
# idx.analyze()
|
|
|
|
self.modules[module] = idx
|
|
|
|
self.idx.merge_public(idx)
|
2005-12-02 01:34:21 +08:00
|
|
|
|
2021-09-23 18:40:01 +08:00
|
|
|
def scanVersions(self):
|
|
|
|
prefix = self.name.upper().replace("-", "_") + "_"
|
|
|
|
|
|
|
|
version = None
|
|
|
|
prevversion = None
|
|
|
|
with open(self.syms, "r") as syms:
|
|
|
|
while True:
|
|
|
|
line = syms.readline()
|
|
|
|
if not line:
|
|
|
|
break
|
|
|
|
line = line.strip()
|
|
|
|
if line.startswith("#"):
|
|
|
|
continue
|
|
|
|
if line == "":
|
|
|
|
continue
|
|
|
|
|
|
|
|
if line.startswith(prefix) and line.endswith(" {"):
|
|
|
|
version = line[len(prefix):-2]
|
|
|
|
elif line == "global:":
|
|
|
|
continue
|
|
|
|
elif line == "local:":
|
|
|
|
continue
|
|
|
|
elif line.startswith("}"):
|
|
|
|
if prevversion is None:
|
|
|
|
if line != "};":
|
|
|
|
raise Exception("Unexpected closing version")
|
|
|
|
else:
|
|
|
|
if line != ("} %s%s;" % (prefix, prevversion)):
|
|
|
|
raise Exception("Unexpected end of version '%s': %s'" % (line, "} " + prefix + version))
|
|
|
|
|
|
|
|
prevversion = version
|
|
|
|
version = None
|
|
|
|
elif line.endswith(";") and version is not None:
|
|
|
|
func = line[:-1]
|
|
|
|
self.versions[func] = version
|
|
|
|
else:
|
|
|
|
raise Exception("Unexpected line in syms file: %s" % line)
|
|
|
|
|
2005-12-02 01:34:21 +08:00
|
|
|
def scan(self):
|
|
|
|
for directory in self.directories:
|
2011-02-16 23:57:50 +08:00
|
|
|
files = glob.glob(directory + "/*.c")
|
|
|
|
for file in files:
|
|
|
|
skip = 1
|
|
|
|
for incl in self.includes:
|
2018-03-15 17:42:44 +08:00
|
|
|
if file.find(incl) != -1:
|
2013-02-07 15:22:01 +08:00
|
|
|
skip = 0
|
2011-02-16 23:57:50 +08:00
|
|
|
break
|
|
|
|
if skip == 0:
|
2013-02-07 15:22:01 +08:00
|
|
|
self.modules[file] = None
|
2011-02-16 23:57:50 +08:00
|
|
|
files = glob.glob(directory + "/*.h")
|
|
|
|
for file in files:
|
|
|
|
skip = 1
|
|
|
|
for incl in self.includes:
|
2018-03-15 17:42:44 +08:00
|
|
|
if file.find(incl) != -1:
|
2013-02-07 15:22:01 +08:00
|
|
|
skip = 0
|
2011-02-16 23:57:50 +08:00
|
|
|
break
|
|
|
|
if skip == 0:
|
2013-02-07 15:22:01 +08:00
|
|
|
self.headers[file] = None
|
2011-02-16 23:57:50 +08:00
|
|
|
self.scanHeaders()
|
|
|
|
self.scanModules()
|
2021-09-23 18:40:01 +08:00
|
|
|
self.scanVersions()
|
2008-02-06 03:27:37 +08:00
|
|
|
|
2022-04-23 03:23:38 +08:00
|
|
|
# Fetch tags from the comment. Only 'Since' supported at the moment.
|
2022-04-23 03:23:42 +08:00
|
|
|
# For functions, since tags are on Return comments.
|
|
|
|
# Return the tags and the original comments, but without the tags.
|
|
|
|
def retrieve_comment_tags(self, name: str, comment: str,
|
|
|
|
return_comment="") -> (str, str, str):
|
2022-04-23 03:23:38 +08:00
|
|
|
since = ""
|
|
|
|
if comment is not None:
|
2022-05-04 19:11:24 +08:00
|
|
|
comment_match = re.search(r"\(?Since: (\d+\.\d+\.\d+\.?\d?)\)?",
|
2022-04-23 03:23:38 +08:00
|
|
|
comment)
|
|
|
|
if comment_match:
|
|
|
|
# Remove Since tag from the comment
|
|
|
|
(start, end) = comment_match.span()
|
|
|
|
comment = comment[:start] + comment[end:]
|
|
|
|
comment = comment.strip()
|
|
|
|
# Only the version
|
|
|
|
since = comment_match.group(1)
|
|
|
|
|
2022-04-23 03:23:42 +08:00
|
|
|
if since == "" and return_comment is not None:
|
2022-05-04 19:11:24 +08:00
|
|
|
return_match = re.search(r"\(?Since: (\d+\.\d+\.\d+\.?\d?)\)?",
|
2022-04-23 03:23:42 +08:00
|
|
|
return_comment)
|
|
|
|
if return_match:
|
|
|
|
# Remove Since tag from the comment
|
|
|
|
(start, end) = return_match.span()
|
|
|
|
return_comment = return_comment[:start] + return_comment[end:]
|
|
|
|
return_comment = return_comment.strip()
|
|
|
|
# Only the version
|
|
|
|
since = return_match.group(1)
|
|
|
|
|
2022-04-23 03:23:38 +08:00
|
|
|
if since == "":
|
|
|
|
self.warning("Missing 'Since' tag for: " + name)
|
2022-04-23 03:23:42 +08:00
|
|
|
return (since, comment, return_comment)
|
2022-04-23 03:23:38 +08:00
|
|
|
|
2005-12-02 01:34:21 +08:00
|
|
|
def modulename_file(self, file):
|
|
|
|
module = os.path.basename(file)
|
2011-02-16 23:57:50 +08:00
|
|
|
if module[-2:] == '.h':
|
|
|
|
module = module[:-2]
|
|
|
|
elif module[-2:] == '.c':
|
|
|
|
module = module[:-2]
|
|
|
|
return module
|
2005-12-02 01:34:21 +08:00
|
|
|
|
|
|
|
def serialize_enum(self, output, name):
|
|
|
|
id = self.idx.enums[name]
|
|
|
|
output.write(" <enum name='%s' file='%s'" % (name,
|
2011-02-16 23:57:50 +08:00
|
|
|
self.modulename_file(id.header)))
|
2013-08-22 17:16:03 +08:00
|
|
|
if id.info is not None:
|
2011-02-16 23:57:50 +08:00
|
|
|
info = id.info
|
2019-01-24 19:23:15 +08:00
|
|
|
valhex = ""
|
2013-08-22 17:16:03 +08:00
|
|
|
if info[0] is not None and info[0] != '':
|
2011-02-16 23:57:50 +08:00
|
|
|
try:
|
|
|
|
val = eval(info[0])
|
2019-01-24 19:23:15 +08:00
|
|
|
valhex = hex(val)
|
2019-09-24 20:42:51 +08:00
|
|
|
except Exception:
|
2011-02-16 23:57:50 +08:00
|
|
|
val = info[0]
|
2013-02-07 15:22:01 +08:00
|
|
|
output.write(" value='%s'" % (val))
|
2019-01-24 19:23:15 +08:00
|
|
|
|
|
|
|
if valhex != "":
|
|
|
|
output.write(" value_hex='%s'" % (valhex))
|
|
|
|
|
2019-09-24 20:29:27 +08:00
|
|
|
m = re.match(r"\(?1<<(\d+)\)?", info[0])
|
2019-01-24 19:23:15 +08:00
|
|
|
if m:
|
|
|
|
output.write(" value_bitshift='%s'" % (m.group(1)))
|
|
|
|
|
2013-08-22 17:16:03 +08:00
|
|
|
if info[2] is not None and info[2] != '':
|
2013-02-07 15:22:01 +08:00
|
|
|
output.write(" type='%s'" % info[2])
|
2013-08-22 17:16:03 +08:00
|
|
|
if info[1] is not None and info[1] != '':
|
2022-04-23 03:23:38 +08:00
|
|
|
# Search for 'Since' version tag
|
2022-04-23 03:23:42 +08:00
|
|
|
(since, comment, _) = self.retrieve_comment_tags(name, info[1])
|
2022-04-23 03:23:38 +08:00
|
|
|
if len(since) > 0:
|
|
|
|
output.write(" version='%s'" % escape(since))
|
|
|
|
if len(comment) > 0:
|
|
|
|
output.write(" info='%s'" % escape(comment))
|
|
|
|
else:
|
|
|
|
self.warning("Missing docstring for enum: " + name)
|
|
|
|
|
2005-12-02 01:34:21 +08:00
|
|
|
output.write("/>\n")
|
|
|
|
|
|
|
|
def serialize_macro(self, output, name):
|
|
|
|
id = self.idx.macros[name]
|
2015-06-05 17:48:59 +08:00
|
|
|
output.write(" <macro name='%s' file='%s'" % (name,
|
2011-02-16 23:57:50 +08:00
|
|
|
self.modulename_file(id.header)))
|
2015-06-05 17:48:59 +08:00
|
|
|
if id.info is None:
|
|
|
|
args = []
|
|
|
|
desc = None
|
2020-05-19 19:30:57 +08:00
|
|
|
params = None
|
2015-06-05 17:48:59 +08:00
|
|
|
strValue = None
|
scripts: emit raw enum value in API build description
Currently the value for an enum is only emitted if it is a plain
string. If the enum is an integer or hex value, or a complex code block,
it is omitted from the API build. This fixes that by emitting the raw
value if no string value is present.
With this change:
<macro name='LIBVIR_CHECK_VERSION'
file='libvirt-common'
params='major,minor,micro'>
<macro name='LIBVIR_VERSION_NUMBER'
file='libvirt-common'>
<macro name='VIR_COPY_CPUMAP'
file='libvirt-domain'
params='cpumaps,maplen,vcpu,cpumap'>
...snip...
<macro name='LIBVIR_CHECK_VERSION'
file='libvirt-common'
params='major,minor,micro'
raw='((major) * 1000000 + (minor) * 1000 + (micro) <= LIBVIR_VERSION_NUMBER)'>
<macro name='LIBVIR_VERSION_NUMBER'
file='libvirt-common'
raw='6004000'>
<macro name='VIR_COPY_CPUMAP'
file='libvirt-domain'
params='cpumaps,maplen,vcpu,cpumap'
raw='memcpy(cpumap, VIR_GET_CPUMAP(cpumaps, maplen, vcpu), maplen)'>
...snip...
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
2020-05-19 19:32:19 +08:00
|
|
|
rawValue = None
|
2015-06-05 17:48:59 +08:00
|
|
|
else:
|
scripts: emit raw enum value in API build description
Currently the value for an enum is only emitted if it is a plain
string. If the enum is an integer or hex value, or a complex code block,
it is omitted from the API build. This fixes that by emitting the raw
value if no string value is present.
With this change:
<macro name='LIBVIR_CHECK_VERSION'
file='libvirt-common'
params='major,minor,micro'>
<macro name='LIBVIR_VERSION_NUMBER'
file='libvirt-common'>
<macro name='VIR_COPY_CPUMAP'
file='libvirt-domain'
params='cpumaps,maplen,vcpu,cpumap'>
...snip...
<macro name='LIBVIR_CHECK_VERSION'
file='libvirt-common'
params='major,minor,micro'
raw='((major) * 1000000 + (minor) * 1000 + (micro) <= LIBVIR_VERSION_NUMBER)'>
<macro name='LIBVIR_VERSION_NUMBER'
file='libvirt-common'
raw='6004000'>
<macro name='VIR_COPY_CPUMAP'
file='libvirt-domain'
params='cpumaps,maplen,vcpu,cpumap'
raw='memcpy(cpumap, VIR_GET_CPUMAP(cpumaps, maplen, vcpu), maplen)'>
...snip...
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
2020-05-19 19:32:19 +08:00
|
|
|
(args, desc, params, strValue, rawValue) = id.info
|
2015-06-05 17:48:59 +08:00
|
|
|
|
2020-05-19 19:30:57 +08:00
|
|
|
if params is not None:
|
|
|
|
output.write(" params='%s'" % params)
|
2015-06-05 17:48:59 +08:00
|
|
|
if strValue is not None:
|
|
|
|
output.write(" string='%s'" % strValue)
|
scripts: emit raw enum value in API build description
Currently the value for an enum is only emitted if it is a plain
string. If the enum is an integer or hex value, or a complex code block,
it is omitted from the API build. This fixes that by emitting the raw
value if no string value is present.
With this change:
<macro name='LIBVIR_CHECK_VERSION'
file='libvirt-common'
params='major,minor,micro'>
<macro name='LIBVIR_VERSION_NUMBER'
file='libvirt-common'>
<macro name='VIR_COPY_CPUMAP'
file='libvirt-domain'
params='cpumaps,maplen,vcpu,cpumap'>
...snip...
<macro name='LIBVIR_CHECK_VERSION'
file='libvirt-common'
params='major,minor,micro'
raw='((major) * 1000000 + (minor) * 1000 + (micro) <= LIBVIR_VERSION_NUMBER)'>
<macro name='LIBVIR_VERSION_NUMBER'
file='libvirt-common'
raw='6004000'>
<macro name='VIR_COPY_CPUMAP'
file='libvirt-domain'
params='cpumaps,maplen,vcpu,cpumap'
raw='memcpy(cpumap, VIR_GET_CPUMAP(cpumaps, maplen, vcpu), maplen)'>
...snip...
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
2020-05-19 19:32:19 +08:00
|
|
|
else:
|
|
|
|
output.write(" raw='%s'" % escape(rawValue))
|
2022-04-23 03:23:41 +08:00
|
|
|
|
2022-04-23 03:23:42 +08:00
|
|
|
(since, comment, _) = self.retrieve_comment_tags(name, desc)
|
2022-04-23 03:23:41 +08:00
|
|
|
if len(since) > 0:
|
|
|
|
output.write(" version='%s'" % escape(since))
|
2015-06-05 17:48:59 +08:00
|
|
|
output.write(">\n")
|
|
|
|
|
2022-04-23 03:23:41 +08:00
|
|
|
if comment is not None and comment != "":
|
|
|
|
output.write(" <info><![CDATA[%s]]></info>\n" % (comment))
|
|
|
|
self.indexString(name, comment)
|
2015-06-05 17:48:59 +08:00
|
|
|
for arg in args:
|
|
|
|
(name, desc) = arg
|
|
|
|
if desc is not None and desc != "":
|
|
|
|
output.write(" <arg name='%s' info='%s'/>\n" % (
|
|
|
|
name, escape(desc)))
|
|
|
|
self.indexString(name, desc)
|
|
|
|
else:
|
2018-03-20 14:49:07 +08:00
|
|
|
output.write(" <arg name='%s'/>\n" % name)
|
2005-12-02 01:34:21 +08:00
|
|
|
output.write(" </macro>\n")
|
|
|
|
|
2011-06-20 11:25:34 +08:00
|
|
|
def serialize_union(self, output, field, desc):
|
2018-03-20 14:48:46 +08:00
|
|
|
output.write(" <field name='%s' type='union' info='%s'>\n" % (field[1], desc))
|
2011-06-20 11:25:34 +08:00
|
|
|
output.write(" <union>\n")
|
|
|
|
for f in field[3]:
|
|
|
|
desc = f[2]
|
2013-08-22 17:16:03 +08:00
|
|
|
if desc is None:
|
2011-06-20 11:25:34 +08:00
|
|
|
desc = ''
|
|
|
|
else:
|
|
|
|
desc = escape(desc)
|
2018-03-20 14:48:46 +08:00
|
|
|
output.write(" <field name='%s' type='%s' info='%s'/>\n" % (f[1], f[0], desc))
|
2011-06-20 11:25:34 +08:00
|
|
|
|
|
|
|
output.write(" </union>\n")
|
|
|
|
output.write(" </field>\n")
|
|
|
|
|
2005-12-02 01:34:21 +08:00
|
|
|
def serialize_typedef(self, output, name):
|
|
|
|
id = self.idx.typedefs[name]
|
2022-04-23 03:23:42 +08:00
|
|
|
(since, comment, _) = self.retrieve_comment_tags(name, id.extra)
|
2022-04-23 03:23:40 +08:00
|
|
|
version_tag = len(since) > 0 and f" version='{since}'" or ""
|
2011-02-16 23:57:50 +08:00
|
|
|
if id.info[0:7] == 'struct ':
|
2022-04-23 03:23:40 +08:00
|
|
|
output.write(" <struct name='%s' file='%s' type='%s'%s" % (
|
|
|
|
name, self.modulename_file(id.header), id.info, version_tag))
|
2011-02-16 23:57:50 +08:00
|
|
|
name = id.info[7:]
|
2018-03-20 14:48:44 +08:00
|
|
|
if (name in self.idx.structs and
|
|
|
|
isinstance(self.idx.structs[name].info, (list, tuple))):
|
2013-02-07 15:22:01 +08:00
|
|
|
output.write(">\n")
|
2011-02-16 23:57:50 +08:00
|
|
|
try:
|
|
|
|
for field in self.idx.structs[name].info:
|
|
|
|
desc = field[2]
|
|
|
|
self.indexString(name, desc)
|
2013-08-22 17:16:03 +08:00
|
|
|
if desc is None:
|
2011-02-16 23:57:50 +08:00
|
|
|
desc = ''
|
|
|
|
else:
|
|
|
|
desc = escape(desc)
|
2011-06-20 11:25:34 +08:00
|
|
|
if field[0] == "union":
|
|
|
|
self.serialize_union(output, field, desc)
|
|
|
|
else:
|
2018-03-20 14:48:46 +08:00
|
|
|
output.write(" <field name='%s' type='%s' info='%s'/>\n" % (field[1], field[0], desc))
|
2019-09-24 20:42:51 +08:00
|
|
|
except Exception:
|
2018-03-20 14:49:07 +08:00
|
|
|
self.warning("Failed to serialize struct %s" % name)
|
2011-02-16 23:57:50 +08:00
|
|
|
output.write(" </struct>\n")
|
|
|
|
else:
|
2013-02-07 15:22:01 +08:00
|
|
|
output.write("/>\n")
|
2018-03-20 14:48:46 +08:00
|
|
|
else:
|
2022-04-23 03:23:40 +08:00
|
|
|
output.write(" <typedef name='%s' file='%s' type='%s'%s" % (
|
|
|
|
name, self.modulename_file(id.header), id.info, version_tag))
|
2005-12-02 01:34:21 +08:00
|
|
|
try:
|
2022-04-23 03:23:40 +08:00
|
|
|
if comment is not None and comment != "":
|
|
|
|
output.write(">\n <info><![CDATA[%s]]></info>\n" % (comment))
|
2011-02-16 23:57:50 +08:00
|
|
|
output.write(" </typedef>\n")
|
|
|
|
else:
|
|
|
|
output.write("/>\n")
|
2019-09-24 20:42:51 +08:00
|
|
|
except Exception:
|
2011-02-16 23:57:50 +08:00
|
|
|
output.write("/>\n")
|
2005-12-02 01:34:21 +08:00
|
|
|
|
|
|
|
def serialize_variable(self, output, name):
|
|
|
|
id = self.idx.variables[name]
|
2022-04-23 03:23:44 +08:00
|
|
|
(type, comment) = id.info
|
|
|
|
(since, comment, _) = self.retrieve_comment_tags(name, comment)
|
|
|
|
version_tag = len(since) > 0 and f" version='{since}'" or ""
|
|
|
|
output.write(" <variable name='%s' file='%s' type='%s'%s" % (
|
|
|
|
name, self.modulename_file(id.header), type, version_tag))
|
|
|
|
if len(comment) == 0:
|
|
|
|
output.write("/>\n")
|
2011-02-16 23:57:50 +08:00
|
|
|
else:
|
2022-04-23 03:23:44 +08:00
|
|
|
output.write(">\n <info><![CDATA[%s]]></info>\n" % (comment))
|
|
|
|
output.write(" </variable>\n")
|
2008-02-06 03:27:37 +08:00
|
|
|
|
2005-12-02 01:34:21 +08:00
|
|
|
def serialize_function(self, output, name):
|
|
|
|
id = self.idx.functions[name]
|
2011-05-12 18:19:42 +08:00
|
|
|
if name == debugsym and not quiet:
|
2018-03-15 17:30:03 +08:00
|
|
|
print("=>", id)
|
2005-12-02 01:34:21 +08:00
|
|
|
|
2022-04-23 03:23:42 +08:00
|
|
|
(ret, params, desc) = id.info
|
|
|
|
return_comment = (ret is not None and ret[1] is not None) and ret[1] or ""
|
|
|
|
(since, comment, return_comment) = self.retrieve_comment_tags(name, desc, return_comment)
|
|
|
|
# Simple way to avoid setting empty version
|
|
|
|
version_tag = len(since) > 0 and f" version='{since}'" or ""
|
|
|
|
|
2016-06-28 19:28:48 +08:00
|
|
|
# NB: this is consumed by a regex in 'getAPIFilenames' in hvsupport.pl
|
2021-09-23 18:40:01 +08:00
|
|
|
if id.type == "function":
|
2022-06-13 20:44:08 +08:00
|
|
|
if name not in self.versions:
|
|
|
|
raise Exception("Missing symbol file entry for '%s'" % name)
|
2021-09-23 18:40:01 +08:00
|
|
|
ver = self.versions[name]
|
|
|
|
if ver is None:
|
|
|
|
raise Exception("Missing version for '%s'" % name)
|
|
|
|
output.write(" <function name='%s' file='%s' module='%s' version='%s'>\n" % (
|
|
|
|
name, self.modulename_file(id.header),
|
|
|
|
self.modulename_file(id.module), self.versions[name]))
|
|
|
|
else:
|
2022-04-23 03:23:42 +08:00
|
|
|
output.write(" <functype name='%s' file='%s' module='%s'%s>\n" % (
|
2021-09-23 18:40:01 +08:00
|
|
|
name, self.modulename_file(id.header),
|
2022-04-23 03:23:42 +08:00
|
|
|
self.modulename_file(id.module),
|
|
|
|
version_tag))
|
2011-02-16 23:57:50 +08:00
|
|
|
#
|
|
|
|
# Processing of conditionals modified by Bill 1/1/05
|
|
|
|
#
|
2013-08-22 17:16:03 +08:00
|
|
|
if id.conditionals is not None:
|
2011-02-16 23:57:50 +08:00
|
|
|
apstr = ""
|
|
|
|
for cond in id.conditionals:
|
|
|
|
if apstr != "":
|
|
|
|
apstr = apstr + " && "
|
|
|
|
apstr = apstr + cond
|
2018-03-20 14:48:47 +08:00
|
|
|
output.write(" <cond>%s</cond>\n" % (apstr))
|
2022-04-23 03:23:42 +08:00
|
|
|
|
2011-02-16 23:57:50 +08:00
|
|
|
try:
|
2022-04-23 03:23:42 +08:00
|
|
|
# For functions, we get the since version from .syms files.
|
|
|
|
# This is an extra check to see that docstrings are correct
|
|
|
|
# and to avoid wrong versions in the .sym files too.
|
|
|
|
ver = name in self.versions and self.versions[name] or None
|
|
|
|
if len(since) > 0 and ver is not None and since != ver:
|
|
|
|
if name in ignored_function_versions:
|
|
|
|
allowedver = ignored_function_versions[name]
|
|
|
|
if allowedver != since:
|
|
|
|
self.warning(f"Function {name} has allowed version {allowedver} but docstring says {since}")
|
|
|
|
else:
|
|
|
|
self.warning(f"Function {name} has symversion {ver} but docstring says {since}")
|
|
|
|
|
|
|
|
output.write(" <info><![CDATA[%s]]></info>\n" % (comment))
|
2011-02-16 23:57:50 +08:00
|
|
|
self.indexString(name, desc)
|
2022-04-23 03:23:42 +08:00
|
|
|
|
2013-08-22 17:16:03 +08:00
|
|
|
if ret[0] is not None:
|
2011-02-16 23:57:50 +08:00
|
|
|
if ret[0] == "void":
|
|
|
|
output.write(" <return type='void'/>\n")
|
2022-04-23 03:23:42 +08:00
|
|
|
elif (return_comment == '') and name not in ignored_functions:
|
2013-01-29 22:35:28 +08:00
|
|
|
self.error("Missing documentation for return of function `%s'" % name)
|
2011-02-16 23:57:50 +08:00
|
|
|
else:
|
|
|
|
output.write(" <return type='%s' info='%s'/>\n" % (
|
2022-04-23 03:23:42 +08:00
|
|
|
ret[0], escape(return_comment)))
|
2011-02-16 23:57:50 +08:00
|
|
|
self.indexString(name, ret[1])
|
2022-04-23 03:23:42 +08:00
|
|
|
|
2011-02-16 23:57:50 +08:00
|
|
|
for param in params:
|
|
|
|
if param[0] == 'void':
|
|
|
|
continue
|
2013-08-22 17:16:03 +08:00
|
|
|
if (param[2] is None or param[2] == ''):
|
2018-03-15 17:39:49 +08:00
|
|
|
if name in ignored_functions:
|
2013-01-29 22:35:28 +08:00
|
|
|
output.write(" <arg name='%s' type='%s' info=''/>\n" % (param[1], param[0]))
|
|
|
|
else:
|
|
|
|
self.error("Missing documentation for arg `%s' of function `%s'" % (param[1], name))
|
2011-02-16 23:57:50 +08:00
|
|
|
else:
|
|
|
|
output.write(" <arg name='%s' type='%s' info='%s'/>\n" % (param[1], param[0], escape(param[2])))
|
|
|
|
self.indexString(name, param[2])
|
2019-09-24 20:42:51 +08:00
|
|
|
except Exception:
|
2018-03-15 17:30:03 +08:00
|
|
|
print("Exception:", sys.exc_info()[1], file=sys.stderr)
|
2018-03-15 17:36:06 +08:00
|
|
|
self.warning("Failed to save function %s info: %s" % (name, repr(id.info)))
|
2005-12-02 01:34:21 +08:00
|
|
|
output.write(" </%s>\n" % (id.type))
|
|
|
|
|
|
|
|
def serialize_exports(self, output, file):
|
|
|
|
module = self.modulename_file(file)
|
2011-02-16 23:57:50 +08:00
|
|
|
output.write(" <file name='%s'>\n" % (module))
|
|
|
|
dict = self.headers[file]
|
2013-08-22 17:16:03 +08:00
|
|
|
if dict.info is not None:
|
2018-12-13 19:23:42 +08:00
|
|
|
for data in ('Summary', 'Description'):
|
2011-02-16 23:57:50 +08:00
|
|
|
try:
|
|
|
|
output.write(" <%s>%s</%s>\n" % (
|
2018-03-17 01:47:36 +08:00
|
|
|
data.lower(),
|
2011-02-16 23:57:50 +08:00
|
|
|
escape(dict.info[data]),
|
2018-03-17 01:47:36 +08:00
|
|
|
data.lower()))
|
|
|
|
except KeyError:
|
2011-05-12 18:19:42 +08:00
|
|
|
self.warning("Header %s lacks a %s description" % (module, data))
|
2018-03-15 17:39:49 +08:00
|
|
|
if 'Description' in dict.info:
|
2011-02-16 23:57:50 +08:00
|
|
|
desc = dict.info['Description']
|
2018-03-15 17:42:44 +08:00
|
|
|
if desc.find("DEPRECATED") != -1:
|
2011-02-16 23:57:50 +08:00
|
|
|
output.write(" <deprecated/>\n")
|
2005-12-02 01:34:21 +08:00
|
|
|
|
2018-03-20 14:48:57 +08:00
|
|
|
for id in uniq(dict.macros.keys()):
|
2011-02-16 23:57:50 +08:00
|
|
|
# Macros are sometime used to masquerade other types.
|
2018-03-15 17:39:49 +08:00
|
|
|
if id in dict.functions:
|
2011-02-16 23:57:50 +08:00
|
|
|
continue
|
2018-03-15 17:39:49 +08:00
|
|
|
if id in dict.variables:
|
2011-02-16 23:57:50 +08:00
|
|
|
continue
|
2018-03-15 17:39:49 +08:00
|
|
|
if id in dict.typedefs:
|
2011-02-16 23:57:50 +08:00
|
|
|
continue
|
2018-03-15 17:39:49 +08:00
|
|
|
if id in dict.structs:
|
2011-02-16 23:57:50 +08:00
|
|
|
continue
|
2018-03-15 17:39:49 +08:00
|
|
|
if id in dict.unions:
|
2011-06-20 11:25:34 +08:00
|
|
|
continue
|
2018-03-15 17:39:49 +08:00
|
|
|
if id in dict.enums:
|
2011-02-16 23:57:50 +08:00
|
|
|
continue
|
|
|
|
output.write(" <exports symbol='%s' type='macro'/>\n" % (id))
|
2018-03-20 14:48:57 +08:00
|
|
|
for id in uniq(dict.enums.keys()):
|
2011-02-16 23:57:50 +08:00
|
|
|
output.write(" <exports symbol='%s' type='enum'/>\n" % (id))
|
2018-03-20 14:48:57 +08:00
|
|
|
for id in uniq(dict.typedefs.keys()):
|
2011-02-16 23:57:50 +08:00
|
|
|
output.write(" <exports symbol='%s' type='typedef'/>\n" % (id))
|
2018-03-20 14:48:57 +08:00
|
|
|
for id in uniq(dict.structs.keys()):
|
2011-02-16 23:57:50 +08:00
|
|
|
output.write(" <exports symbol='%s' type='struct'/>\n" % (id))
|
2018-03-20 14:48:57 +08:00
|
|
|
for id in uniq(dict.variables.keys()):
|
2011-02-16 23:57:50 +08:00
|
|
|
output.write(" <exports symbol='%s' type='variable'/>\n" % (id))
|
2018-03-20 14:48:57 +08:00
|
|
|
for id in uniq(dict.functions.keys()):
|
2011-02-16 23:57:50 +08:00
|
|
|
output.write(" <exports symbol='%s' type='function'/>\n" % (id))
|
|
|
|
output.write(" </file>\n")
|
2005-12-02 01:34:21 +08:00
|
|
|
|
|
|
|
def serialize(self):
|
2011-02-16 22:09:09 +08:00
|
|
|
filename = "%s/%s-api.xml" % (self.path, self.name)
|
2011-05-12 18:19:42 +08:00
|
|
|
if not quiet:
|
2018-03-15 17:30:03 +08:00
|
|
|
print("Saving XML description %s" % (filename))
|
2005-12-02 01:34:21 +08:00
|
|
|
output = open(filename, "w")
|
2020-05-19 19:25:21 +08:00
|
|
|
output.write('<?xml version="1.0" encoding="UTF-8"?>\n')
|
2005-12-02 01:34:21 +08:00
|
|
|
output.write("<api name='%s'>\n" % self.name)
|
|
|
|
output.write(" <files>\n")
|
2018-03-15 17:51:57 +08:00
|
|
|
headers = sorted(self.headers.keys())
|
2005-12-02 01:34:21 +08:00
|
|
|
for file in headers:
|
|
|
|
self.serialize_exports(output, file)
|
|
|
|
output.write(" </files>\n")
|
|
|
|
output.write(" <symbols>\n")
|
2018-03-15 17:51:57 +08:00
|
|
|
macros = sorted(self.idx.macros.keys())
|
2005-12-02 01:34:21 +08:00
|
|
|
for macro in macros:
|
|
|
|
self.serialize_macro(output, macro)
|
2018-03-15 17:51:57 +08:00
|
|
|
enums = sorted(self.idx.enums.keys())
|
2005-12-02 01:34:21 +08:00
|
|
|
for enum in enums:
|
|
|
|
self.serialize_enum(output, enum)
|
2018-03-15 17:51:57 +08:00
|
|
|
typedefs = sorted(self.idx.typedefs.keys())
|
2005-12-02 01:34:21 +08:00
|
|
|
for typedef in typedefs:
|
|
|
|
self.serialize_typedef(output, typedef)
|
2018-03-15 17:51:57 +08:00
|
|
|
variables = sorted(self.idx.variables.keys())
|
2005-12-02 01:34:21 +08:00
|
|
|
for variable in variables:
|
|
|
|
self.serialize_variable(output, variable)
|
2018-03-15 17:51:57 +08:00
|
|
|
functions = sorted(self.idx.functions.keys())
|
2005-12-02 01:34:21 +08:00
|
|
|
for function in functions:
|
|
|
|
self.serialize_function(output, function)
|
|
|
|
output.write(" </symbols>\n")
|
|
|
|
output.write("</api>\n")
|
|
|
|
output.close()
|
|
|
|
|
2013-01-29 22:35:28 +08:00
|
|
|
if self.errors > 0:
|
2018-03-15 17:30:03 +08:00
|
|
|
print("apibuild.py: %d error(s) encountered during generation" % self.errors, file=sys.stderr)
|
2013-01-29 22:35:28 +08:00
|
|
|
sys.exit(3)
|
|
|
|
|
2005-12-02 01:34:21 +08:00
|
|
|
|
2016-04-25 20:26:50 +08:00
|
|
|
class app:
|
|
|
|
def warning(self, msg):
|
|
|
|
global warnings
|
|
|
|
warnings = warnings + 1
|
2018-03-15 17:30:03 +08:00
|
|
|
print(msg)
|
2016-04-25 20:26:50 +08:00
|
|
|
|
2020-06-25 20:31:59 +08:00
|
|
|
def rebuild(self, name, srcdir, builddir):
|
2021-09-23 18:40:01 +08:00
|
|
|
syms = {
|
|
|
|
"libvirt": srcdir + "/../src/libvirt_public.syms",
|
|
|
|
"libvirt-qemu": srcdir + "/../src/libvirt_qemu.syms",
|
|
|
|
"libvirt-lxc": srcdir + "/../src/libvirt_lxc.syms",
|
|
|
|
"libvirt-admin": srcdir + "/../src/admin/libvirt_admin_public.syms",
|
|
|
|
}
|
|
|
|
if name not in syms:
|
2016-04-25 20:30:14 +08:00
|
|
|
self.warning("rebuild() failed, unknown module %s" % name)
|
2016-04-25 20:26:50 +08:00
|
|
|
return None
|
2021-09-23 18:40:01 +08:00
|
|
|
|
2016-04-25 20:26:50 +08:00
|
|
|
builder = None
|
2018-03-20 14:48:46 +08:00
|
|
|
if glob.glob(srcdir + "/../src/libvirt.c") != []:
|
2016-04-25 20:26:50 +08:00
|
|
|
if not quiet:
|
2018-03-15 17:30:03 +08:00
|
|
|
print("Rebuilding API description for %s" % name)
|
2016-04-25 20:26:50 +08:00
|
|
|
dirs = [srcdir + "/../src",
|
2019-10-15 18:41:29 +08:00
|
|
|
srcdir + "/../src/admin",
|
2016-04-25 20:26:50 +08:00
|
|
|
srcdir + "/../src/util",
|
2019-10-18 23:20:30 +08:00
|
|
|
srcdir + "/../include/libvirt",
|
|
|
|
builddir + "/../include/libvirt"]
|
2021-09-23 18:40:01 +08:00
|
|
|
builder = docBuilder(name, syms[name], builddir, dirs, [])
|
2016-04-25 20:26:50 +08:00
|
|
|
else:
|
|
|
|
self.warning("rebuild() failed, unable to guess the module")
|
|
|
|
return None
|
|
|
|
builder.scan()
|
|
|
|
builder.analyze()
|
|
|
|
builder.serialize()
|
|
|
|
return builder
|
|
|
|
|
|
|
|
#
|
|
|
|
# for debugging the parser
|
|
|
|
#
|
|
|
|
def parse(self, filename):
|
|
|
|
parser = CParser(filename)
|
|
|
|
idx = parser.parse()
|
|
|
|
return idx
|
2005-12-02 01:34:21 +08:00
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
2020-06-25 20:31:59 +08:00
|
|
|
parser = argparse.ArgumentParser(description="XML API builder")
|
|
|
|
parser.add_argument("srcdir", type=str, help="path to docs source dir")
|
|
|
|
parser.add_argument("builddir", type=str, help="path to docs build dir")
|
|
|
|
parser.add_argument("-d", "--debug", type=str, help="path to source file")
|
|
|
|
|
|
|
|
args = parser.parse_args()
|
|
|
|
|
2016-04-25 20:26:50 +08:00
|
|
|
app = app()
|
2020-06-25 20:31:59 +08:00
|
|
|
|
|
|
|
if args.debug:
|
2005-12-02 01:34:21 +08:00
|
|
|
debug = 1
|
2020-06-25 20:31:59 +08:00
|
|
|
app.parse(args.debug)
|
2005-12-02 01:34:21 +08:00
|
|
|
else:
|
2020-06-25 20:31:59 +08:00
|
|
|
app.rebuild("libvirt", args.srcdir, args.builddir)
|
|
|
|
app.rebuild("libvirt-qemu", args.srcdir, args.builddir)
|
|
|
|
app.rebuild("libvirt-lxc", args.srcdir, args.builddir)
|
|
|
|
app.rebuild("libvirt-admin", args.srcdir, args.builddir)
|
|
|
|
|
2011-05-12 18:19:42 +08:00
|
|
|
if warnings > 0:
|
|
|
|
sys.exit(2)
|
|
|
|
else:
|
|
|
|
sys.exit(0)
|