Merge f4d0cc426f
("Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux") into android-mainline
Steps on the way to 5.15-rc6 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I14a7282d4eb0640786f799e1e20e99454a32d846
This commit is contained in:
commit
8861ab35bc
|
@ -1226,7 +1226,7 @@ PAGE_SIZE multiple when read back.
|
|||
|
||||
Note that all fields in this file are hierarchical and the
|
||||
file modified event can be generated due to an event down the
|
||||
hierarchy. For for the local events at the cgroup level see
|
||||
hierarchy. For the local events at the cgroup level see
|
||||
memory.events.local.
|
||||
|
||||
low
|
||||
|
@ -2170,19 +2170,19 @@ existing device files.
|
|||
|
||||
Cgroup v2 device controller has no interface files and is implemented
|
||||
on top of cgroup BPF. To control access to device files, a user may
|
||||
create bpf programs of the BPF_CGROUP_DEVICE type and attach them
|
||||
to cgroups. On an attempt to access a device file, corresponding
|
||||
BPF programs will be executed, and depending on the return value
|
||||
the attempt will succeed or fail with -EPERM.
|
||||
create bpf programs of type BPF_PROG_TYPE_CGROUP_DEVICE and attach
|
||||
them to cgroups with BPF_CGROUP_DEVICE flag. On an attempt to access a
|
||||
device file, corresponding BPF programs will be executed, and depending
|
||||
on the return value the attempt will succeed or fail with -EPERM.
|
||||
|
||||
A BPF_CGROUP_DEVICE program takes a pointer to the bpf_cgroup_dev_ctx
|
||||
structure, which describes the device access attempt: access type
|
||||
(mknod/read/write) and device (type, major and minor numbers).
|
||||
If the program returns 0, the attempt fails with -EPERM, otherwise
|
||||
it succeeds.
|
||||
A BPF_PROG_TYPE_CGROUP_DEVICE program takes a pointer to the
|
||||
bpf_cgroup_dev_ctx structure, which describes the device access attempt:
|
||||
access type (mknod/read/write) and device (type, major and minor numbers).
|
||||
If the program returns 0, the attempt fails with -EPERM, otherwise it
|
||||
succeeds.
|
||||
|
||||
An example of BPF_CGROUP_DEVICE program may be found in the kernel
|
||||
source tree in the tools/testing/selftests/bpf/progs/dev_cgroup.c file.
|
||||
An example of BPF_PROG_TYPE_CGROUP_DEVICE program may be found in
|
||||
tools/testing/selftests/bpf/progs/dev_cgroup.c in the kernel source tree.
|
||||
|
||||
|
||||
RDMA
|
||||
|
|
|
@ -9314,7 +9314,7 @@ S: Maintained
|
|||
F: drivers/platform/x86/intel/atomisp2/led.c
|
||||
|
||||
INTEL BIOS SAR INT1092 DRIVER
|
||||
M: Shravan S <s.shravan@intel.com>
|
||||
M: Shravan Sudhakar <s.shravan@intel.com>
|
||||
M: Intel Corporation <linuxwwan@intel.com>
|
||||
L: platform-driver-x86@vger.kernel.org
|
||||
S: Maintained
|
||||
|
|
|
@ -43,7 +43,7 @@ void __init arm64_hugetlb_cma_reserve(void)
|
|||
#ifdef CONFIG_ARM64_4K_PAGES
|
||||
order = PUD_SHIFT - PAGE_SHIFT;
|
||||
#else
|
||||
order = CONT_PMD_SHIFT + PMD_SHIFT - PAGE_SHIFT;
|
||||
order = CONT_PMD_SHIFT - PAGE_SHIFT;
|
||||
#endif
|
||||
/*
|
||||
* HugeTLB CMA reservation is required for gigantic
|
||||
|
|
|
@ -36,7 +36,7 @@ struct acpi_gtdt_descriptor {
|
|||
|
||||
static struct acpi_gtdt_descriptor acpi_gtdt_desc __initdata;
|
||||
|
||||
static inline void *next_platform_timer(void *platform_timer)
|
||||
static inline __init void *next_platform_timer(void *platform_timer)
|
||||
{
|
||||
struct acpi_gtdt_header *gh = platform_timer;
|
||||
|
||||
|
|
|
@ -2,4 +2,4 @@
|
|||
obj-$(CONFIG_TEST_ASYNC_DRIVER_PROBE) += test_async_driver_probe.o
|
||||
|
||||
obj-$(CONFIG_DRIVER_PE_KUNIT_TEST) += property-entry-test.o
|
||||
CFLAGS_REMOVE_property-entry-test.o += -fplugin-arg-structleak_plugin-byref -fplugin-arg-structleak_plugin-byref-all
|
||||
CFLAGS_property-entry-test.o += $(DISABLE_STRUCTLEAK_PLUGIN)
|
||||
|
|
|
@ -5,3 +5,4 @@
|
|||
|
||||
# Keep in alphabetical order
|
||||
obj-$(CONFIG_IIO_TEST_FORMAT) += iio-test-format.o
|
||||
CFLAGS_iio-test-format.o += $(DISABLE_STRUCTLEAK_PLUGIN)
|
||||
|
|
|
@ -98,7 +98,7 @@ mlxreg_io_get_reg(void *regmap, struct mlxreg_core_data *data, u32 in_val,
|
|||
if (ret)
|
||||
goto access_error;
|
||||
|
||||
*regval |= rol32(val, regsize * i);
|
||||
*regval |= rol32(val, regsize * i * 8);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -141,7 +141,7 @@ mlxreg_io_attr_store(struct device *dev, struct device_attribute *attr,
|
|||
return -EINVAL;
|
||||
|
||||
/* Convert buffer to input value. */
|
||||
ret = kstrtou32(buf, len, &input_val);
|
||||
ret = kstrtou32(buf, 0, &input_val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -476,6 +476,7 @@ static const struct acpi_device_id amd_pmc_acpi_ids[] = {
|
|||
{"AMDI0006", 0},
|
||||
{"AMDI0007", 0},
|
||||
{"AMD0004", 0},
|
||||
{"AMD0005", 0},
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, amd_pmc_acpi_ids);
|
||||
|
|
|
@ -167,6 +167,7 @@ config DELL_WMI
|
|||
config DELL_WMI_PRIVACY
|
||||
bool "Dell WMI Hardware Privacy Support"
|
||||
depends on LEDS_TRIGGER_AUDIO = y || DELL_WMI = LEDS_TRIGGER_AUDIO
|
||||
depends on DELL_WMI
|
||||
help
|
||||
This option adds integration with the "Dell Hardware Privacy"
|
||||
feature of Dell laptops to the dell-wmi driver.
|
||||
|
|
|
@ -141,6 +141,7 @@ static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev)
|
|||
|
||||
static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
|
||||
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M S2H V2"),
|
||||
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE AX V2"),
|
||||
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"),
|
||||
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE V2"),
|
||||
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 GAMING X V2"),
|
||||
|
|
|
@ -42,12 +42,20 @@ static void update_sar_data(struct wwan_sar_context *context)
|
|||
|
||||
if (config->device_mode_info &&
|
||||
context->sar_data.device_mode < config->total_dev_mode) {
|
||||
struct wwan_device_mode_info *dev_mode =
|
||||
&config->device_mode_info[context->sar_data.device_mode];
|
||||
int itr = 0;
|
||||
|
||||
context->sar_data.antennatable_index = dev_mode->antennatable_index;
|
||||
context->sar_data.bandtable_index = dev_mode->bandtable_index;
|
||||
context->sar_data.sartable_index = dev_mode->sartable_index;
|
||||
for (itr = 0; itr < config->total_dev_mode; itr++) {
|
||||
if (context->sar_data.device_mode ==
|
||||
config->device_mode_info[itr].device_mode) {
|
||||
struct wwan_device_mode_info *dev_mode =
|
||||
&config->device_mode_info[itr];
|
||||
|
||||
context->sar_data.antennatable_index = dev_mode->antennatable_index;
|
||||
context->sar_data.bandtable_index = dev_mode->bandtable_index;
|
||||
context->sar_data.sartable_index = dev_mode->sartable_index;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -305,7 +313,6 @@ static struct platform_driver sar_driver = {
|
|||
.remove = sar_remove,
|
||||
.driver = {
|
||||
.name = DRVNAME,
|
||||
.owner = THIS_MODULE,
|
||||
.acpi_match_table = ACPI_PTR(sar_device_ids)
|
||||
}
|
||||
};
|
||||
|
@ -313,4 +320,4 @@ module_platform_driver(sar_driver);
|
|||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_DESCRIPTION("Platform device driver for INTEL MODEM BIOS SAR");
|
||||
MODULE_AUTHOR("Shravan S <s.shravan@intel.com>");
|
||||
MODULE_AUTHOR("Shravan Sudhakar <s.shravan@intel.com>");
|
||||
|
|
|
@ -401,7 +401,7 @@ int skl_int3472_discrete_remove(struct platform_device *pdev)
|
|||
|
||||
gpiod_remove_lookup_table(&int3472->gpios);
|
||||
|
||||
if (int3472->clock.ena_gpio)
|
||||
if (int3472->clock.cl)
|
||||
skl_int3472_unregister_clock(int3472);
|
||||
|
||||
gpiod_put(int3472->clock.ena_gpio);
|
||||
|
|
|
@ -75,7 +75,7 @@ struct intel_scu_ipc_dev {
|
|||
#define IPC_READ_BUFFER 0x90
|
||||
|
||||
/* Timeout in jiffies */
|
||||
#define IPC_TIMEOUT (5 * HZ)
|
||||
#define IPC_TIMEOUT (10 * HZ)
|
||||
|
||||
static struct intel_scu_ipc_dev *ipcdev; /* Only one for now */
|
||||
static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
|
||||
|
@ -232,7 +232,7 @@ static inline u32 ipc_data_readl(struct intel_scu_ipc_dev *scu, u32 offset)
|
|||
/* Wait till scu status is busy */
|
||||
static inline int busy_loop(struct intel_scu_ipc_dev *scu)
|
||||
{
|
||||
unsigned long end = jiffies + msecs_to_jiffies(IPC_TIMEOUT);
|
||||
unsigned long end = jiffies + IPC_TIMEOUT;
|
||||
|
||||
do {
|
||||
u32 status;
|
||||
|
@ -247,7 +247,7 @@ static inline int busy_loop(struct intel_scu_ipc_dev *scu)
|
|||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
/* Wait till ipc ioc interrupt is received or timeout in 3 HZ */
|
||||
/* Wait till ipc ioc interrupt is received or timeout in 10 HZ */
|
||||
static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu)
|
||||
{
|
||||
int status;
|
||||
|
|
|
@ -7,6 +7,7 @@ thunderbolt-objs += usb4_port.o nvm.o retimer.o quirks.o
|
|||
thunderbolt-${CONFIG_ACPI} += acpi.o
|
||||
thunderbolt-$(CONFIG_DEBUG_FS) += debugfs.o
|
||||
thunderbolt-${CONFIG_USB4_KUNIT_TEST} += test.o
|
||||
CFLAGS_test.o += $(DISABLE_STRUCTLEAK_PLUGIN)
|
||||
|
||||
thunderbolt_dma_test-${CONFIG_USB4_DMA_TEST} += dma_test.o
|
||||
obj-$(CONFIG_USB4_DMA_TEST) += thunderbolt_dma_test.o
|
||||
|
|
|
@ -3030,7 +3030,7 @@ struct btrfs_dir_item *
|
|||
btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
struct btrfs_path *path, u64 dir,
|
||||
u64 objectid, const char *name, int name_len,
|
||||
u64 index, const char *name, int name_len,
|
||||
int mod);
|
||||
struct btrfs_dir_item *
|
||||
btrfs_search_dir_index_item(struct btrfs_root *root,
|
||||
|
|
|
@ -190,9 +190,20 @@ static struct btrfs_dir_item *btrfs_lookup_match_dir(
|
|||
}
|
||||
|
||||
/*
|
||||
* lookup a directory item based on name. 'dir' is the objectid
|
||||
* we're searching in, and 'mod' tells us if you plan on deleting the
|
||||
* item (use mod < 0) or changing the options (use mod > 0)
|
||||
* Lookup for a directory item by name.
|
||||
*
|
||||
* @trans: The transaction handle to use. Can be NULL if @mod is 0.
|
||||
* @root: The root of the target tree.
|
||||
* @path: Path to use for the search.
|
||||
* @dir: The inode number (objectid) of the directory.
|
||||
* @name: The name associated to the directory entry we are looking for.
|
||||
* @name_len: The length of the name.
|
||||
* @mod: Used to indicate if the tree search is meant for a read only
|
||||
* lookup, for a modification lookup or for a deletion lookup, so
|
||||
* its value should be 0, 1 or -1, respectively.
|
||||
*
|
||||
* Returns: NULL if the dir item does not exists, an error pointer if an error
|
||||
* happened, or a pointer to a dir item if a dir item exists for the given name.
|
||||
*/
|
||||
struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
|
@ -273,27 +284,42 @@ int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
|
|||
}
|
||||
|
||||
/*
|
||||
* lookup a directory item based on index. 'dir' is the objectid
|
||||
* we're searching in, and 'mod' tells us if you plan on deleting the
|
||||
* item (use mod < 0) or changing the options (use mod > 0)
|
||||
* Lookup for a directory index item by name and index number.
|
||||
*
|
||||
* The name is used to make sure the index really points to the name you were
|
||||
* looking for.
|
||||
* @trans: The transaction handle to use. Can be NULL if @mod is 0.
|
||||
* @root: The root of the target tree.
|
||||
* @path: Path to use for the search.
|
||||
* @dir: The inode number (objectid) of the directory.
|
||||
* @index: The index number.
|
||||
* @name: The name associated to the directory entry we are looking for.
|
||||
* @name_len: The length of the name.
|
||||
* @mod: Used to indicate if the tree search is meant for a read only
|
||||
* lookup, for a modification lookup or for a deletion lookup, so
|
||||
* its value should be 0, 1 or -1, respectively.
|
||||
*
|
||||
* Returns: NULL if the dir index item does not exists, an error pointer if an
|
||||
* error happened, or a pointer to a dir item if the dir index item exists and
|
||||
* matches the criteria (name and index number).
|
||||
*/
|
||||
struct btrfs_dir_item *
|
||||
btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
struct btrfs_path *path, u64 dir,
|
||||
u64 objectid, const char *name, int name_len,
|
||||
u64 index, const char *name, int name_len,
|
||||
int mod)
|
||||
{
|
||||
struct btrfs_dir_item *di;
|
||||
struct btrfs_key key;
|
||||
|
||||
key.objectid = dir;
|
||||
key.type = BTRFS_DIR_INDEX_KEY;
|
||||
key.offset = objectid;
|
||||
key.offset = index;
|
||||
|
||||
return btrfs_lookup_match_dir(trans, root, path, &key, name, name_len, mod);
|
||||
di = btrfs_lookup_match_dir(trans, root, path, &key, name, name_len, mod);
|
||||
if (di == ERR_PTR(-ENOENT))
|
||||
return NULL;
|
||||
|
||||
return di;
|
||||
}
|
||||
|
||||
struct btrfs_dir_item *
|
||||
|
|
|
@ -4859,6 +4859,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
|
|||
out_free_delayed:
|
||||
btrfs_free_delayed_extent_op(extent_op);
|
||||
out_free_buf:
|
||||
btrfs_tree_unlock(buf);
|
||||
free_extent_buffer(buf);
|
||||
out_free_reserved:
|
||||
btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 0);
|
||||
|
|
|
@ -734,8 +734,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans,
|
|||
if (args->start >= inode->disk_i_size && !args->replace_extent)
|
||||
modify_tree = 0;
|
||||
|
||||
update_refs = (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) ||
|
||||
root == fs_info->tree_root);
|
||||
update_refs = (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID);
|
||||
while (1) {
|
||||
recow = 0;
|
||||
ret = btrfs_lookup_file_extent(trans, root, path, ino,
|
||||
|
@ -2704,14 +2703,16 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
|
|||
drop_args.bytes_found);
|
||||
if (ret != -ENOSPC) {
|
||||
/*
|
||||
* When cloning we want to avoid transaction aborts when
|
||||
* nothing was done and we are attempting to clone parts
|
||||
* of inline extents, in such cases -EOPNOTSUPP is
|
||||
* returned by __btrfs_drop_extents() without having
|
||||
* changed anything in the file.
|
||||
* The only time we don't want to abort is if we are
|
||||
* attempting to clone a partial inline extent, in which
|
||||
* case we'll get EOPNOTSUPP. However if we aren't
|
||||
* clone we need to abort no matter what, because if we
|
||||
* got EOPNOTSUPP via prealloc then we messed up and
|
||||
* need to abort.
|
||||
*/
|
||||
if (extent_info && !extent_info->is_new_extent &&
|
||||
ret && ret != -EOPNOTSUPP)
|
||||
if (ret &&
|
||||
(ret != -EOPNOTSUPP ||
|
||||
(extent_info && extent_info->is_new_extent)))
|
||||
btrfs_abort_transaction(trans, ret);
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -939,9 +939,11 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
|
||||
/*
|
||||
* helper function to see if a given name and sequence number found
|
||||
* in an inode back reference are already in a directory and correctly
|
||||
* point to this inode
|
||||
* See if a given name and sequence number found in an inode back reference are
|
||||
* already in a directory and correctly point to this inode.
|
||||
*
|
||||
* Returns: < 0 on error, 0 if the directory entry does not exists and 1 if it
|
||||
* exists.
|
||||
*/
|
||||
static noinline int inode_in_dir(struct btrfs_root *root,
|
||||
struct btrfs_path *path,
|
||||
|
@ -950,29 +952,34 @@ static noinline int inode_in_dir(struct btrfs_root *root,
|
|||
{
|
||||
struct btrfs_dir_item *di;
|
||||
struct btrfs_key location;
|
||||
int match = 0;
|
||||
int ret = 0;
|
||||
|
||||
di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
|
||||
index, name, name_len, 0);
|
||||
if (di && !IS_ERR(di)) {
|
||||
if (IS_ERR(di)) {
|
||||
ret = PTR_ERR(di);
|
||||
goto out;
|
||||
} else if (di) {
|
||||
btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
|
||||
if (location.objectid != objectid)
|
||||
goto out;
|
||||
} else
|
||||
} else {
|
||||
goto out;
|
||||
btrfs_release_path(path);
|
||||
}
|
||||
|
||||
btrfs_release_path(path);
|
||||
di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
|
||||
if (di && !IS_ERR(di)) {
|
||||
btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
|
||||
if (location.objectid != objectid)
|
||||
goto out;
|
||||
} else
|
||||
if (IS_ERR(di)) {
|
||||
ret = PTR_ERR(di);
|
||||
goto out;
|
||||
match = 1;
|
||||
} else if (di) {
|
||||
btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
|
||||
if (location.objectid == objectid)
|
||||
ret = 1;
|
||||
}
|
||||
out:
|
||||
btrfs_release_path(path);
|
||||
return match;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1182,7 +1189,9 @@ static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
|
|||
/* look for a conflicting sequence number */
|
||||
di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
|
||||
ref_index, name, namelen, 0);
|
||||
if (di && !IS_ERR(di)) {
|
||||
if (IS_ERR(di)) {
|
||||
return PTR_ERR(di);
|
||||
} else if (di) {
|
||||
ret = drop_one_dir_item(trans, root, path, dir, di);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -1192,7 +1201,9 @@ static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
|
|||
/* look for a conflicting name */
|
||||
di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
|
||||
name, namelen, 0);
|
||||
if (di && !IS_ERR(di)) {
|
||||
if (IS_ERR(di)) {
|
||||
return PTR_ERR(di);
|
||||
} else if (di) {
|
||||
ret = drop_one_dir_item(trans, root, path, dir, di);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -1517,10 +1528,12 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
|
|||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* if we already have a perfect match, we're done */
|
||||
if (!inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
|
||||
btrfs_ino(BTRFS_I(inode)), ref_index,
|
||||
name, namelen)) {
|
||||
ret = inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
|
||||
btrfs_ino(BTRFS_I(inode)), ref_index,
|
||||
name, namelen);
|
||||
if (ret < 0) {
|
||||
goto out;
|
||||
} else if (ret == 0) {
|
||||
/*
|
||||
* look for a conflicting back reference in the
|
||||
* metadata. if we find one we have to unlink that name
|
||||
|
@ -1580,6 +1593,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
|
|||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
/* Else, ret == 1, we already have a perfect match, we're done. */
|
||||
|
||||
ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
|
||||
kfree(name);
|
||||
|
@ -1936,8 +1950,8 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_key log_key;
|
||||
struct inode *dir;
|
||||
u8 log_type;
|
||||
int exists;
|
||||
int ret = 0;
|
||||
bool exists;
|
||||
int ret;
|
||||
bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
|
||||
bool name_added = false;
|
||||
|
||||
|
@ -1957,12 +1971,12 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
|
|||
name_len);
|
||||
|
||||
btrfs_dir_item_key_to_cpu(eb, di, &log_key);
|
||||
exists = btrfs_lookup_inode(trans, root, path, &log_key, 0);
|
||||
if (exists == 0)
|
||||
exists = 1;
|
||||
else
|
||||
exists = 0;
|
||||
ret = btrfs_lookup_inode(trans, root, path, &log_key, 0);
|
||||
btrfs_release_path(path);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
exists = (ret == 0);
|
||||
ret = 0;
|
||||
|
||||
if (key->type == BTRFS_DIR_ITEM_KEY) {
|
||||
dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
|
||||
|
@ -1977,7 +1991,11 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
|
|||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
if (IS_ERR_OR_NULL(dst_di)) {
|
||||
|
||||
if (IS_ERR(dst_di)) {
|
||||
ret = PTR_ERR(dst_di);
|
||||
goto out;
|
||||
} else if (!dst_di) {
|
||||
/* we need a sequence number to insert, so we only
|
||||
* do inserts for the BTRFS_DIR_INDEX_KEY types
|
||||
*/
|
||||
|
@ -2281,7 +2299,7 @@ static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
|
|||
dir_key->offset,
|
||||
name, name_len, 0);
|
||||
}
|
||||
if (!log_di || log_di == ERR_PTR(-ENOENT)) {
|
||||
if (!log_di) {
|
||||
btrfs_dir_item_key_to_cpu(eb, di, &location);
|
||||
btrfs_release_path(path);
|
||||
btrfs_release_path(log_path);
|
||||
|
@ -3540,8 +3558,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
|
|||
if (err == -ENOSPC) {
|
||||
btrfs_set_log_full_commit(trans);
|
||||
err = 0;
|
||||
} else if (err < 0 && err != -ENOENT) {
|
||||
/* ENOENT can be returned if the entry hasn't been fsynced yet */
|
||||
} else if (err < 0) {
|
||||
btrfs_abort_transaction(trans, err);
|
||||
}
|
||||
|
||||
|
|
|
@ -613,7 +613,7 @@ void kunit_remove_resource(struct kunit *test, struct kunit_resource *res);
|
|||
* and is automatically cleaned up after the test case concludes. See &struct
|
||||
* kunit_resource for more information.
|
||||
*/
|
||||
void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t flags);
|
||||
void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t gfp);
|
||||
|
||||
/**
|
||||
* kunit_kmalloc() - Like kmalloc() except the allocation is *test managed*.
|
||||
|
@ -657,9 +657,9 @@ static inline void *kunit_kzalloc(struct kunit *test, size_t size, gfp_t gfp)
|
|||
*
|
||||
* See kcalloc() and kunit_kmalloc_array() for more information.
|
||||
*/
|
||||
static inline void *kunit_kcalloc(struct kunit *test, size_t n, size_t size, gfp_t flags)
|
||||
static inline void *kunit_kcalloc(struct kunit *test, size_t n, size_t size, gfp_t gfp)
|
||||
{
|
||||
return kunit_kmalloc_array(test, n, size, flags | __GFP_ZERO);
|
||||
return kunit_kmalloc_array(test, n, size, gfp | __GFP_ZERO);
|
||||
}
|
||||
|
||||
void kunit_cleanup(struct kunit *test);
|
||||
|
|
|
@ -399,9 +399,8 @@ extern struct workqueue_struct *system_freezable_power_efficient_wq;
|
|||
* RETURNS:
|
||||
* Pointer to the allocated workqueue on success, %NULL on failure.
|
||||
*/
|
||||
struct workqueue_struct *alloc_workqueue(const char *fmt,
|
||||
unsigned int flags,
|
||||
int max_active, ...);
|
||||
__printf(1, 4) struct workqueue_struct *
|
||||
alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...);
|
||||
|
||||
/**
|
||||
* alloc_ordered_workqueue - allocate an ordered workqueue
|
||||
|
|
|
@ -314,17 +314,19 @@ static struct cpuset top_cpuset = {
|
|||
if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
|
||||
|
||||
/*
|
||||
* There are two global locks guarding cpuset structures - cpuset_mutex and
|
||||
* There are two global locks guarding cpuset structures - cpuset_rwsem and
|
||||
* callback_lock. We also require taking task_lock() when dereferencing a
|
||||
* task's cpuset pointer. See "The task_lock() exception", at the end of this
|
||||
* comment.
|
||||
* comment. The cpuset code uses only cpuset_rwsem write lock. Other
|
||||
* kernel subsystems can use cpuset_read_lock()/cpuset_read_unlock() to
|
||||
* prevent change to cpuset structures.
|
||||
*
|
||||
* A task must hold both locks to modify cpusets. If a task holds
|
||||
* cpuset_mutex, then it blocks others wanting that mutex, ensuring that it
|
||||
* cpuset_rwsem, it blocks others wanting that rwsem, ensuring that it
|
||||
* is the only task able to also acquire callback_lock and be able to
|
||||
* modify cpusets. It can perform various checks on the cpuset structure
|
||||
* first, knowing nothing will change. It can also allocate memory while
|
||||
* just holding cpuset_mutex. While it is performing these checks, various
|
||||
* just holding cpuset_rwsem. While it is performing these checks, various
|
||||
* callback routines can briefly acquire callback_lock to query cpusets.
|
||||
* Once it is ready to make the changes, it takes callback_lock, blocking
|
||||
* everyone else.
|
||||
|
@ -385,7 +387,7 @@ static inline bool is_in_v2_mode(void)
|
|||
* One way or another, we guarantee to return some non-empty subset
|
||||
* of cpu_online_mask.
|
||||
*
|
||||
* Call with callback_lock or cpuset_mutex held.
|
||||
* Call with callback_lock or cpuset_rwsem held.
|
||||
*/
|
||||
static void guarantee_online_cpus(struct task_struct *tsk,
|
||||
struct cpumask *pmask)
|
||||
|
@ -427,7 +429,7 @@ static void guarantee_online_cpus(struct task_struct *tsk,
|
|||
* One way or another, we guarantee to return some non-empty subset
|
||||
* of node_states[N_MEMORY].
|
||||
*
|
||||
* Call with callback_lock or cpuset_mutex held.
|
||||
* Call with callback_lock or cpuset_rwsem held.
|
||||
*/
|
||||
static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
|
||||
{
|
||||
|
@ -439,7 +441,7 @@ static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
|
|||
/*
|
||||
* update task's spread flag if cpuset's page/slab spread flag is set
|
||||
*
|
||||
* Call with callback_lock or cpuset_mutex held.
|
||||
* Call with callback_lock or cpuset_rwsem held.
|
||||
*/
|
||||
static void cpuset_update_task_spread_flag(struct cpuset *cs,
|
||||
struct task_struct *tsk)
|
||||
|
@ -460,7 +462,7 @@ static void cpuset_update_task_spread_flag(struct cpuset *cs,
|
|||
*
|
||||
* One cpuset is a subset of another if all its allowed CPUs and
|
||||
* Memory Nodes are a subset of the other, and its exclusive flags
|
||||
* are only set if the other's are set. Call holding cpuset_mutex.
|
||||
* are only set if the other's are set. Call holding cpuset_rwsem.
|
||||
*/
|
||||
|
||||
static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
|
||||
|
@ -576,7 +578,7 @@ static inline void free_cpuset(struct cpuset *cs)
|
|||
* If we replaced the flag and mask values of the current cpuset
|
||||
* (cur) with those values in the trial cpuset (trial), would
|
||||
* our various subset and exclusive rules still be valid? Presumes
|
||||
* cpuset_mutex held.
|
||||
* cpuset_rwsem held.
|
||||
*
|
||||
* 'cur' is the address of an actual, in-use cpuset. Operations
|
||||
* such as list traversal that depend on the actual address of the
|
||||
|
@ -699,7 +701,7 @@ static void update_domain_attr_tree(struct sched_domain_attr *dattr,
|
|||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/* Must be called with cpuset_mutex held. */
|
||||
/* Must be called with cpuset_rwsem held. */
|
||||
static inline int nr_cpusets(void)
|
||||
{
|
||||
/* jump label reference count + the top-level cpuset */
|
||||
|
@ -725,7 +727,7 @@ static inline int nr_cpusets(void)
|
|||
* domains when operating in the severe memory shortage situations
|
||||
* that could cause allocation failures below.
|
||||
*
|
||||
* Must be called with cpuset_mutex held.
|
||||
* Must be called with cpuset_rwsem held.
|
||||
*
|
||||
* The three key local variables below are:
|
||||
* cp - cpuset pointer, used (together with pos_css) to perform a
|
||||
|
@ -1004,7 +1006,7 @@ partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
|
|||
* 'cpus' is removed, then call this routine to rebuild the
|
||||
* scheduler's dynamic sched domains.
|
||||
*
|
||||
* Call with cpuset_mutex held. Takes cpus_read_lock().
|
||||
* Call with cpuset_rwsem held. Takes cpus_read_lock().
|
||||
*/
|
||||
static void rebuild_sched_domains_locked(void)
|
||||
{
|
||||
|
@ -1089,7 +1091,7 @@ static int update_cpus_allowed(struct cpuset *cs, struct task_struct *p,
|
|||
* @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
|
||||
*
|
||||
* Iterate through each task of @cs updating its cpus_allowed to the
|
||||
* effective cpuset's. As this function is called with cpuset_mutex held,
|
||||
* effective cpuset's. As this function is called with cpuset_rwsem held,
|
||||
* cpuset membership stays stable.
|
||||
*/
|
||||
static void update_tasks_cpumask(struct cpuset *cs)
|
||||
|
@ -1358,7 +1360,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
|
|||
*
|
||||
* On legacy hierarchy, effective_cpus will be the same with cpu_allowed.
|
||||
*
|
||||
* Called with cpuset_mutex held
|
||||
* Called with cpuset_rwsem held
|
||||
*/
|
||||
static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
|
||||
{
|
||||
|
@ -1717,12 +1719,12 @@ static void *cpuset_being_rebound;
|
|||
* @cs: the cpuset in which each task's mems_allowed mask needs to be changed
|
||||
*
|
||||
* Iterate through each task of @cs updating its mems_allowed to the
|
||||
* effective cpuset's. As this function is called with cpuset_mutex held,
|
||||
* effective cpuset's. As this function is called with cpuset_rwsem held,
|
||||
* cpuset membership stays stable.
|
||||
*/
|
||||
static void update_tasks_nodemask(struct cpuset *cs)
|
||||
{
|
||||
static nodemask_t newmems; /* protected by cpuset_mutex */
|
||||
static nodemask_t newmems; /* protected by cpuset_rwsem */
|
||||
struct css_task_iter it;
|
||||
struct task_struct *task;
|
||||
|
||||
|
@ -1735,7 +1737,7 @@ static void update_tasks_nodemask(struct cpuset *cs)
|
|||
* take while holding tasklist_lock. Forks can happen - the
|
||||
* mpol_dup() cpuset_being_rebound check will catch such forks,
|
||||
* and rebind their vma mempolicies too. Because we still hold
|
||||
* the global cpuset_mutex, we know that no other rebind effort
|
||||
* the global cpuset_rwsem, we know that no other rebind effort
|
||||
* will be contending for the global variable cpuset_being_rebound.
|
||||
* It's ok if we rebind the same mm twice; mpol_rebind_mm()
|
||||
* is idempotent. Also migrate pages in each mm to new nodes.
|
||||
|
@ -1781,7 +1783,7 @@ static void update_tasks_nodemask(struct cpuset *cs)
|
|||
*
|
||||
* On legacy hierarchy, effective_mems will be the same with mems_allowed.
|
||||
*
|
||||
* Called with cpuset_mutex held
|
||||
* Called with cpuset_rwsem held
|
||||
*/
|
||||
static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
|
||||
{
|
||||
|
@ -1834,7 +1836,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
|
|||
* mempolicies and if the cpuset is marked 'memory_migrate',
|
||||
* migrate the tasks pages to the new memory.
|
||||
*
|
||||
* Call with cpuset_mutex held. May take callback_lock during call.
|
||||
* Call with cpuset_rwsem held. May take callback_lock during call.
|
||||
* Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
|
||||
* lock each such tasks mm->mmap_lock, scan its vma's and rebind
|
||||
* their mempolicies to the cpusets new mems_allowed.
|
||||
|
@ -1924,7 +1926,7 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
|
|||
* @cs: the cpuset in which each task's spread flags needs to be changed
|
||||
*
|
||||
* Iterate through each task of @cs updating its spread flags. As this
|
||||
* function is called with cpuset_mutex held, cpuset membership stays
|
||||
* function is called with cpuset_rwsem held, cpuset membership stays
|
||||
* stable.
|
||||
*/
|
||||
static void update_tasks_flags(struct cpuset *cs)
|
||||
|
@ -1944,7 +1946,7 @@ static void update_tasks_flags(struct cpuset *cs)
|
|||
* cs: the cpuset to update
|
||||
* turning_on: whether the flag is being set or cleared
|
||||
*
|
||||
* Call with cpuset_mutex held.
|
||||
* Call with cpuset_rwsem held.
|
||||
*/
|
||||
|
||||
static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
|
||||
|
@ -1993,7 +1995,7 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
|
|||
* cs: the cpuset to update
|
||||
* new_prs: new partition root state
|
||||
*
|
||||
* Call with cpuset_mutex held.
|
||||
* Call with cpuset_rwsem held.
|
||||
*/
|
||||
static int update_prstate(struct cpuset *cs, int new_prs)
|
||||
{
|
||||
|
@ -2180,7 +2182,7 @@ static int fmeter_getrate(struct fmeter *fmp)
|
|||
|
||||
static struct cpuset *cpuset_attach_old_cs;
|
||||
|
||||
/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
|
||||
/* Called by cgroups to determine if a cpuset is usable; cpuset_rwsem held */
|
||||
static int cpuset_can_attach(struct cgroup_taskset *tset)
|
||||
{
|
||||
struct cgroup_subsys_state *css;
|
||||
|
@ -2232,7 +2234,7 @@ static void cpuset_cancel_attach(struct cgroup_taskset *tset)
|
|||
}
|
||||
|
||||
/*
|
||||
* Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach()
|
||||
* Protected by cpuset_rwsem. cpus_attach is used only by cpuset_attach()
|
||||
* but we can't allocate it dynamically there. Define it global and
|
||||
* allocate from cpuset_init().
|
||||
*/
|
||||
|
@ -2240,7 +2242,7 @@ static cpumask_var_t cpus_attach;
|
|||
|
||||
static void cpuset_attach(struct cgroup_taskset *tset)
|
||||
{
|
||||
/* static buf protected by cpuset_mutex */
|
||||
/* static buf protected by cpuset_rwsem */
|
||||
static nodemask_t cpuset_attach_nodemask_to;
|
||||
struct task_struct *task;
|
||||
struct task_struct *leader;
|
||||
|
@ -2430,7 +2432,7 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
|
|||
* operation like this one can lead to a deadlock through kernfs
|
||||
* active_ref protection. Let's break the protection. Losing the
|
||||
* protection is okay as we check whether @cs is online after
|
||||
* grabbing cpuset_mutex anyway. This only happens on the legacy
|
||||
* grabbing cpuset_rwsem anyway. This only happens on the legacy
|
||||
* hierarchies.
|
||||
*/
|
||||
css_get(&cs->css);
|
||||
|
@ -3688,7 +3690,7 @@ void __cpuset_memory_pressure_bump(void)
|
|||
* - Used for /proc/<pid>/cpuset.
|
||||
* - No need to task_lock(tsk) on this tsk->cpuset reference, as it
|
||||
* doesn't really matter if tsk->cpuset changes after we read it,
|
||||
* and we take cpuset_mutex, keeping cpuset_attach() from changing it
|
||||
* and we take cpuset_rwsem, keeping cpuset_attach() from changing it
|
||||
* anyway.
|
||||
*/
|
||||
int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
|
||||
|
|
|
@ -4834,8 +4834,16 @@ void show_workqueue_state(void)
|
|||
|
||||
for_each_pwq(pwq, wq) {
|
||||
raw_spin_lock_irqsave(&pwq->pool->lock, flags);
|
||||
if (pwq->nr_active || !list_empty(&pwq->inactive_works))
|
||||
if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
|
||||
/*
|
||||
* Defer printing to avoid deadlocks in console
|
||||
* drivers that queue work while holding locks
|
||||
* also taken in their write paths.
|
||||
*/
|
||||
printk_deferred_enter();
|
||||
show_pwq(pwq);
|
||||
printk_deferred_exit();
|
||||
}
|
||||
raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
|
||||
/*
|
||||
* We could be printing a lot from atomic context, e.g.
|
||||
|
@ -4853,7 +4861,12 @@ void show_workqueue_state(void)
|
|||
raw_spin_lock_irqsave(&pool->lock, flags);
|
||||
if (pool->nr_workers == pool->nr_idle)
|
||||
goto next_pool;
|
||||
|
||||
/*
|
||||
* Defer printing to avoid deadlocks in console drivers that
|
||||
* queue work while holding locks also taken in their write
|
||||
* paths.
|
||||
*/
|
||||
printk_deferred_enter();
|
||||
pr_info("pool %d:", pool->id);
|
||||
pr_cont_pool_info(pool);
|
||||
pr_cont(" hung=%us workers=%d",
|
||||
|
@ -4868,6 +4881,7 @@ void show_workqueue_state(void)
|
|||
first = false;
|
||||
}
|
||||
pr_cont("\n");
|
||||
printk_deferred_exit();
|
||||
next_pool:
|
||||
raw_spin_unlock_irqrestore(&pool->lock, flags);
|
||||
/*
|
||||
|
|
|
@ -351,7 +351,7 @@ obj-$(CONFIG_OBJAGG) += objagg.o
|
|||
obj-$(CONFIG_PLDMFW) += pldmfw/
|
||||
|
||||
# KUnit tests
|
||||
CFLAGS_bitfield_kunit.o := $(call cc-option,-Wframe-larger-than=10240)
|
||||
CFLAGS_bitfield_kunit.o := $(DISABLE_STRUCTLEAK_PLUGIN)
|
||||
obj-$(CONFIG_BITFIELD_KUNIT) += bitfield_kunit.o
|
||||
obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o
|
||||
obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o
|
||||
|
|
|
@ -116,8 +116,8 @@ static void kfree_at_end(struct kunit *test, const void *to_free)
|
|||
/* kfree() handles NULL already, but avoid allocating a no-op cleanup. */
|
||||
if (IS_ERR_OR_NULL(to_free))
|
||||
return;
|
||||
kunit_alloc_and_get_resource(test, NULL, kfree_res_free, GFP_KERNEL,
|
||||
(void *)to_free);
|
||||
kunit_alloc_resource(test, NULL, kfree_res_free, GFP_KERNEL,
|
||||
(void *)to_free);
|
||||
}
|
||||
|
||||
static struct kunit_suite *alloc_fake_suite(struct kunit *test,
|
||||
|
|
|
@ -19,6 +19,10 @@ gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF) \
|
|||
+= -fplugin-arg-structleak_plugin-byref
|
||||
gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL) \
|
||||
+= -fplugin-arg-structleak_plugin-byref-all
|
||||
ifdef CONFIG_GCC_PLUGIN_STRUCTLEAK
|
||||
DISABLE_STRUCTLEAK_PLUGIN += -fplugin-arg-structleak_plugin-disable
|
||||
endif
|
||||
export DISABLE_STRUCTLEAK_PLUGIN
|
||||
gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK) \
|
||||
+= -DSTRUCTLEAK_PLUGIN
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ assert sys.version_info >= (3, 7), "Python version is too old"
|
|||
|
||||
from collections import namedtuple
|
||||
from enum import Enum, auto
|
||||
from typing import Iterable
|
||||
from typing import Iterable, Sequence
|
||||
|
||||
import kunit_config
|
||||
import kunit_json
|
||||
|
@ -186,6 +186,26 @@ def run_tests(linux: kunit_kernel.LinuxSourceTree,
|
|||
exec_result.elapsed_time))
|
||||
return parse_result
|
||||
|
||||
# Problem:
|
||||
# $ kunit.py run --json
|
||||
# works as one would expect and prints the parsed test results as JSON.
|
||||
# $ kunit.py run --json suite_name
|
||||
# would *not* pass suite_name as the filter_glob and print as json.
|
||||
# argparse will consider it to be another way of writing
|
||||
# $ kunit.py run --json=suite_name
|
||||
# i.e. it would run all tests, and dump the json to a `suite_name` file.
|
||||
# So we hackily automatically rewrite --json => --json=stdout
|
||||
pseudo_bool_flag_defaults = {
|
||||
'--json': 'stdout',
|
||||
'--raw_output': 'kunit',
|
||||
}
|
||||
def massage_argv(argv: Sequence[str]) -> Sequence[str]:
|
||||
def massage_arg(arg: str) -> str:
|
||||
if arg not in pseudo_bool_flag_defaults:
|
||||
return arg
|
||||
return f'{arg}={pseudo_bool_flag_defaults[arg]}'
|
||||
return list(map(massage_arg, argv))
|
||||
|
||||
def add_common_opts(parser) -> None:
|
||||
parser.add_argument('--build_dir',
|
||||
help='As in the make command, it specifies the build '
|
||||
|
@ -303,7 +323,7 @@ def main(argv, linux=None):
|
|||
help='Specifies the file to read results from.',
|
||||
type=str, nargs='?', metavar='input_file')
|
||||
|
||||
cli_args = parser.parse_args(argv)
|
||||
cli_args = parser.parse_args(massage_argv(argv))
|
||||
|
||||
if get_kernel_root_path():
|
||||
os.chdir(get_kernel_root_path())
|
||||
|
|
|
@ -408,6 +408,14 @@ class KUnitMainTest(unittest.TestCase):
|
|||
self.assertNotEqual(call, mock.call(StrContains('Testing complete.')))
|
||||
self.assertNotEqual(call, mock.call(StrContains(' 0 tests run')))
|
||||
|
||||
def test_run_raw_output_does_not_take_positional_args(self):
|
||||
# --raw_output is a string flag, but we don't want it to consume
|
||||
# any positional arguments, only ones after an '='
|
||||
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
|
||||
kunit.main(['run', '--raw_output', 'filter_glob'], self.linux_source_mock)
|
||||
self.linux_source_mock.run_kernel.assert_called_once_with(
|
||||
args=None, build_dir='.kunit', filter_glob='filter_glob', timeout=300)
|
||||
|
||||
def test_exec_timeout(self):
|
||||
timeout = 3453
|
||||
kunit.main(['exec', '--timeout', str(timeout)], self.linux_source_mock)
|
||||
|
|
Loading…
Reference in New Issue