Merge 0e9bcda5d2
("Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux") into android-mainline
Fixes along the way to 5.11-rc6 Change-Id: I96948e314f1b11074f5b86cdf45a37af7084eabd Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
da8c4ad2d7
|
@ -247,9 +247,11 @@ static inline const void *__tag_set(const void *addr, u8 tag)
|
|||
|
||||
|
||||
/*
|
||||
* The linear kernel range starts at the bottom of the virtual address space.
|
||||
* Check whether an arbitrary address is within the linear map, which
|
||||
* lives in the [PAGE_OFFSET, PAGE_END) interval at the bottom of the
|
||||
* kernel's TTBR1 address range.
|
||||
*/
|
||||
#define __is_lm_address(addr) (((u64)(addr) & ~PAGE_OFFSET) < (PAGE_END - PAGE_OFFSET))
|
||||
#define __is_lm_address(addr) (((u64)(addr) ^ PAGE_OFFSET) < (PAGE_END - PAGE_OFFSET))
|
||||
|
||||
#define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET)
|
||||
#define __kimg_to_phys(addr) ((addr) - kimage_voffset)
|
||||
|
|
|
@ -1016,6 +1016,8 @@ static void blkcg_css_offline(struct cgroup_subsys_state *css)
|
|||
*/
|
||||
void blkcg_destroy_blkgs(struct blkcg *blkcg)
|
||||
{
|
||||
might_sleep();
|
||||
|
||||
spin_lock_irq(&blkcg->lock);
|
||||
|
||||
while (!hlist_empty(&blkcg->blkg_list)) {
|
||||
|
@ -1023,14 +1025,20 @@ void blkcg_destroy_blkgs(struct blkcg *blkcg)
|
|||
struct blkcg_gq, blkcg_node);
|
||||
struct request_queue *q = blkg->q;
|
||||
|
||||
if (spin_trylock(&q->queue_lock)) {
|
||||
blkg_destroy(blkg);
|
||||
spin_unlock(&q->queue_lock);
|
||||
} else {
|
||||
if (need_resched() || !spin_trylock(&q->queue_lock)) {
|
||||
/*
|
||||
* Given that the system can accumulate a huge number
|
||||
* of blkgs in pathological cases, check to see if we
|
||||
* need to rescheduling to avoid softlockup.
|
||||
*/
|
||||
spin_unlock_irq(&blkcg->lock);
|
||||
cpu_relax();
|
||||
cond_resched();
|
||||
spin_lock_irq(&blkcg->lock);
|
||||
continue;
|
||||
}
|
||||
|
||||
blkg_destroy(blkg);
|
||||
spin_unlock(&q->queue_lock);
|
||||
}
|
||||
|
||||
spin_unlock_irq(&blkcg->lock);
|
||||
|
|
|
@ -304,7 +304,7 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
|
|||
struct request_queue *q = hctx->queue;
|
||||
struct blk_mq_tag_set *set = q->tag_set;
|
||||
|
||||
if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &q->queue_flags))
|
||||
if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
|
||||
return true;
|
||||
users = atomic_read(&set->active_queues_shared_sbitmap);
|
||||
} else {
|
||||
|
|
|
@ -45,10 +45,11 @@ static void disk_release_events(struct gendisk *disk);
|
|||
void set_capacity(struct gendisk *disk, sector_t sectors)
|
||||
{
|
||||
struct block_device *bdev = disk->part0;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&bdev->bd_size_lock);
|
||||
spin_lock_irqsave(&bdev->bd_size_lock, flags);
|
||||
i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT);
|
||||
spin_unlock(&bdev->bd_size_lock);
|
||||
spin_unlock_irqrestore(&bdev->bd_size_lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(set_capacity);
|
||||
|
||||
|
|
|
@ -88,9 +88,11 @@ static int (*check_part[])(struct parsed_partitions *) = {
|
|||
|
||||
static void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors)
|
||||
{
|
||||
spin_lock(&bdev->bd_size_lock);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&bdev->bd_size_lock, flags);
|
||||
i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT);
|
||||
spin_unlock(&bdev->bd_size_lock);
|
||||
spin_unlock_irqrestore(&bdev->bd_size_lock, flags);
|
||||
}
|
||||
|
||||
static struct parsed_partitions *allocate_partitions(struct gendisk *hd)
|
||||
|
@ -384,7 +386,7 @@ static struct block_device *add_partition(struct gendisk *disk, int partno,
|
|||
|
||||
err = blk_alloc_devt(bdev, &devt);
|
||||
if (err)
|
||||
goto out_bdput;
|
||||
goto out_put;
|
||||
pdev->devt = devt;
|
||||
|
||||
/* delay uevent until 'holders' subdir is created */
|
||||
|
|
|
@ -1107,6 +1107,11 @@ static int nc_dma_get_range(struct device *dev, u64 *size)
|
|||
|
||||
ncomp = (struct acpi_iort_named_component *)node->node_data;
|
||||
|
||||
if (!ncomp->memory_address_limit) {
|
||||
pr_warn(FW_BUG "Named component missing memory address limit\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*size = ncomp->memory_address_limit >= 64 ? U64_MAX :
|
||||
1ULL<<ncomp->memory_address_limit;
|
||||
|
||||
|
@ -1126,6 +1131,11 @@ static int rc_dma_get_range(struct device *dev, u64 *size)
|
|||
|
||||
rc = (struct acpi_iort_root_complex *)node->node_data;
|
||||
|
||||
if (!rc->memory_address_limit) {
|
||||
pr_warn(FW_BUG "Root complex missing memory address limit\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*size = rc->memory_address_limit >= 64 ? U64_MAX :
|
||||
1ULL<<rc->memory_address_limit;
|
||||
|
||||
|
@ -1173,8 +1183,8 @@ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
|
|||
end = dmaaddr + size - 1;
|
||||
mask = DMA_BIT_MASK(ilog2(end) + 1);
|
||||
dev->bus_dma_limit = end;
|
||||
dev->coherent_dma_mask = mask;
|
||||
*dev->dma_mask = mask;
|
||||
dev->coherent_dma_mask = min(dev->coherent_dma_mask, mask);
|
||||
*dev->dma_mask = min(*dev->dma_mask, mask);
|
||||
}
|
||||
|
||||
*dma_addr = dmaaddr;
|
||||
|
|
|
@ -251,20 +251,12 @@ int __acpi_device_uevent_modalias(struct acpi_device *adev,
|
|||
if (add_uevent_var(env, "MODALIAS="))
|
||||
return -ENOMEM;
|
||||
|
||||
len = create_pnp_modalias(adev, &env->buf[env->buflen - 1],
|
||||
sizeof(env->buf) - env->buflen);
|
||||
if (len < 0)
|
||||
return len;
|
||||
|
||||
env->buflen += len;
|
||||
if (!adev->data.of_compatible)
|
||||
return 0;
|
||||
|
||||
if (len > 0 && add_uevent_var(env, "MODALIAS="))
|
||||
return -ENOMEM;
|
||||
|
||||
len = create_of_modalias(adev, &env->buf[env->buflen - 1],
|
||||
sizeof(env->buf) - env->buflen);
|
||||
if (adev->data.of_compatible)
|
||||
len = create_of_modalias(adev, &env->buf[env->buflen - 1],
|
||||
sizeof(env->buf) - env->buflen);
|
||||
else
|
||||
len = create_pnp_modalias(adev, &env->buf[env->buflen - 1],
|
||||
sizeof(env->buf) - env->buflen);
|
||||
if (len < 0)
|
||||
return len;
|
||||
|
||||
|
|
|
@ -174,6 +174,8 @@ struct acpi_thermal {
|
|||
struct thermal_zone_device *thermal_zone;
|
||||
int kelvin_offset; /* in millidegrees */
|
||||
struct work_struct thermal_check_work;
|
||||
struct mutex thermal_check_lock;
|
||||
refcount_t thermal_check_count;
|
||||
};
|
||||
|
||||
/* --------------------------------------------------------------------------
|
||||
|
@ -495,14 +497,6 @@ static int acpi_thermal_get_trip_points(struct acpi_thermal *tz)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void acpi_thermal_check(void *data)
|
||||
{
|
||||
struct acpi_thermal *tz = data;
|
||||
|
||||
thermal_zone_device_update(tz->thermal_zone,
|
||||
THERMAL_EVENT_UNSPECIFIED);
|
||||
}
|
||||
|
||||
/* sys I/F for generic thermal sysfs support */
|
||||
|
||||
static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp)
|
||||
|
@ -900,6 +894,12 @@ static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz)
|
|||
Driver Interface
|
||||
-------------------------------------------------------------------------- */
|
||||
|
||||
static void acpi_queue_thermal_check(struct acpi_thermal *tz)
|
||||
{
|
||||
if (!work_pending(&tz->thermal_check_work))
|
||||
queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work);
|
||||
}
|
||||
|
||||
static void acpi_thermal_notify(struct acpi_device *device, u32 event)
|
||||
{
|
||||
struct acpi_thermal *tz = acpi_driver_data(device);
|
||||
|
@ -910,17 +910,17 @@ static void acpi_thermal_notify(struct acpi_device *device, u32 event)
|
|||
|
||||
switch (event) {
|
||||
case ACPI_THERMAL_NOTIFY_TEMPERATURE:
|
||||
acpi_thermal_check(tz);
|
||||
acpi_queue_thermal_check(tz);
|
||||
break;
|
||||
case ACPI_THERMAL_NOTIFY_THRESHOLDS:
|
||||
acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_THRESHOLDS);
|
||||
acpi_thermal_check(tz);
|
||||
acpi_queue_thermal_check(tz);
|
||||
acpi_bus_generate_netlink_event(device->pnp.device_class,
|
||||
dev_name(&device->dev), event, 0);
|
||||
break;
|
||||
case ACPI_THERMAL_NOTIFY_DEVICES:
|
||||
acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_DEVICES);
|
||||
acpi_thermal_check(tz);
|
||||
acpi_queue_thermal_check(tz);
|
||||
acpi_bus_generate_netlink_event(device->pnp.device_class,
|
||||
dev_name(&device->dev), event, 0);
|
||||
break;
|
||||
|
@ -1020,7 +1020,25 @@ static void acpi_thermal_check_fn(struct work_struct *work)
|
|||
{
|
||||
struct acpi_thermal *tz = container_of(work, struct acpi_thermal,
|
||||
thermal_check_work);
|
||||
acpi_thermal_check(tz);
|
||||
|
||||
/*
|
||||
* In general, it is not sufficient to check the pending bit, because
|
||||
* subsequent instances of this function may be queued after one of them
|
||||
* has started running (e.g. if _TMP sleeps). Avoid bailing out if just
|
||||
* one of them is running, though, because it may have done the actual
|
||||
* check some time ago, so allow at least one of them to block on the
|
||||
* mutex while another one is running the update.
|
||||
*/
|
||||
if (!refcount_dec_not_one(&tz->thermal_check_count))
|
||||
return;
|
||||
|
||||
mutex_lock(&tz->thermal_check_lock);
|
||||
|
||||
thermal_zone_device_update(tz->thermal_zone, THERMAL_EVENT_UNSPECIFIED);
|
||||
|
||||
refcount_inc(&tz->thermal_check_count);
|
||||
|
||||
mutex_unlock(&tz->thermal_check_lock);
|
||||
}
|
||||
|
||||
static int acpi_thermal_add(struct acpi_device *device)
|
||||
|
@ -1052,6 +1070,8 @@ static int acpi_thermal_add(struct acpi_device *device)
|
|||
if (result)
|
||||
goto free_memory;
|
||||
|
||||
refcount_set(&tz->thermal_check_count, 3);
|
||||
mutex_init(&tz->thermal_check_lock);
|
||||
INIT_WORK(&tz->thermal_check_work, acpi_thermal_check_fn);
|
||||
|
||||
pr_info(PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device),
|
||||
|
@ -1117,7 +1137,7 @@ static int acpi_thermal_resume(struct device *dev)
|
|||
tz->state.active |= tz->trips.active[i].flags.enabled;
|
||||
}
|
||||
|
||||
queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work);
|
||||
acpi_queue_thermal_check(tz);
|
||||
|
||||
return AE_OK;
|
||||
}
|
||||
|
|
|
@ -1022,6 +1022,12 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
|
|||
if (!sock)
|
||||
return err;
|
||||
|
||||
/*
|
||||
* We need to make sure we don't get any errant requests while we're
|
||||
* reallocating the ->socks array.
|
||||
*/
|
||||
blk_mq_freeze_queue(nbd->disk->queue);
|
||||
|
||||
if (!netlink && !nbd->task_setup &&
|
||||
!test_bit(NBD_RT_BOUND, &config->runtime_flags))
|
||||
nbd->task_setup = current;
|
||||
|
@ -1060,10 +1066,12 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
|
|||
nsock->cookie = 0;
|
||||
socks[config->num_connections++] = nsock;
|
||||
atomic_inc(&config->live_connections);
|
||||
blk_mq_unfreeze_queue(nbd->disk->queue);
|
||||
|
||||
return 0;
|
||||
|
||||
put_socket:
|
||||
blk_mq_unfreeze_queue(nbd->disk->queue);
|
||||
sockfd_put(sock);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -6,7 +6,10 @@
|
|||
#define CREATE_TRACE_POINTS
|
||||
#include "trace.h"
|
||||
|
||||
#define MB_TO_SECTS(mb) (((sector_t)mb * SZ_1M) >> SECTOR_SHIFT)
|
||||
static inline sector_t mb_to_sects(unsigned long mb)
|
||||
{
|
||||
return ((sector_t)mb * SZ_1M) >> SECTOR_SHIFT;
|
||||
}
|
||||
|
||||
static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
|
||||
{
|
||||
|
@ -77,12 +80,11 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
zone_capacity_sects = MB_TO_SECTS(dev->zone_capacity);
|
||||
dev_capacity_sects = MB_TO_SECTS(dev->size);
|
||||
dev->zone_size_sects = MB_TO_SECTS(dev->zone_size);
|
||||
dev->nr_zones = dev_capacity_sects >> ilog2(dev->zone_size_sects);
|
||||
if (dev_capacity_sects & (dev->zone_size_sects - 1))
|
||||
dev->nr_zones++;
|
||||
zone_capacity_sects = mb_to_sects(dev->zone_capacity);
|
||||
dev_capacity_sects = mb_to_sects(dev->size);
|
||||
dev->zone_size_sects = mb_to_sects(dev->zone_size);
|
||||
dev->nr_zones = round_up(dev_capacity_sects, dev->zone_size_sects)
|
||||
>> ilog2(dev->zone_size_sects);
|
||||
|
||||
dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct nullb_zone),
|
||||
GFP_KERNEL | __GFP_ZERO);
|
||||
|
|
|
@ -32,8 +32,8 @@ DCN30 = dcn30_init.o dcn30_hubbub.o dcn30_hubp.o dcn30_dpp.o dcn30_optc.o \
|
|||
|
||||
|
||||
ifdef CONFIG_X86
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mhard-float -msse
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mhard-float -msse
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -msse
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -msse
|
||||
endif
|
||||
|
||||
ifdef CONFIG_PPC64
|
||||
|
@ -45,6 +45,8 @@ ifdef CONFIG_CC_IS_GCC
|
|||
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
|
||||
IS_OLD_GCC = 1
|
||||
endif
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o += -mhard-float
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o += -mhard-float
|
||||
endif
|
||||
|
||||
ifdef CONFIG_X86
|
||||
|
|
|
@ -14,7 +14,7 @@ DCN301 = dcn301_init.o dcn301_resource.o dcn301_dccg.o \
|
|||
dcn301_dio_link_encoder.o dcn301_hwseq.o dcn301_panel_cntl.o dcn301_hubbub.o
|
||||
|
||||
ifdef CONFIG_X86
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -mhard-float -msse
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -msse
|
||||
endif
|
||||
|
||||
ifdef CONFIG_PPC64
|
||||
|
@ -25,6 +25,7 @@ ifdef CONFIG_CC_IS_GCC
|
|||
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
|
||||
IS_OLD_GCC = 1
|
||||
endif
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o += -mhard-float
|
||||
endif
|
||||
|
||||
ifdef CONFIG_X86
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
DCN3_02 = dcn302_init.o dcn302_hwseq.o dcn302_resource.o
|
||||
|
||||
ifdef CONFIG_X86
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -mhard-float -msse
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -msse
|
||||
endif
|
||||
|
||||
ifdef CONFIG_PPC64
|
||||
|
@ -24,6 +24,7 @@ ifdef CONFIG_CC_IS_GCC
|
|||
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
|
||||
IS_OLD_GCC = 1
|
||||
endif
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o += -mhard-float
|
||||
endif
|
||||
|
||||
ifdef CONFIG_X86
|
||||
|
|
|
@ -553,6 +553,7 @@ struct pptable_funcs {
|
|||
*clock_req);
|
||||
uint32_t (*get_fan_control_mode)(struct smu_context *smu);
|
||||
int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode);
|
||||
int (*set_fan_speed_percent)(struct smu_context *smu, uint32_t speed);
|
||||
int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed);
|
||||
int (*set_xgmi_pstate)(struct smu_context *smu, uint32_t pstate);
|
||||
int (*gfx_off_control)(struct smu_context *smu, bool enable);
|
||||
|
|
|
@ -203,6 +203,9 @@ int
|
|||
smu_v11_0_set_fan_control_mode(struct smu_context *smu,
|
||||
uint32_t mode);
|
||||
|
||||
int
|
||||
smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed);
|
||||
|
||||
int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
|
||||
uint32_t speed);
|
||||
|
||||
|
|
|
@ -2151,19 +2151,14 @@ int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
|
|||
int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
|
||||
{
|
||||
int ret = 0;
|
||||
uint32_t rpm;
|
||||
|
||||
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
if (smu->ppt_funcs->set_fan_speed_rpm) {
|
||||
if (speed > 100)
|
||||
speed = 100;
|
||||
rpm = speed * smu->fan_max_rpm / 100;
|
||||
ret = smu->ppt_funcs->set_fan_speed_rpm(smu, rpm);
|
||||
}
|
||||
if (smu->ppt_funcs->set_fan_speed_percent)
|
||||
ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
|
||||
|
||||
mutex_unlock(&smu->mutex);
|
||||
|
||||
|
|
|
@ -2326,6 +2326,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
|
|||
.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
|
||||
.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
|
||||
.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
|
||||
.set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
|
||||
.set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
|
||||
.set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
|
||||
.gfx_off_control = smu_v11_0_gfx_off_control,
|
||||
|
|
|
@ -2456,6 +2456,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
|
|||
.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
|
||||
.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
|
||||
.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
|
||||
.set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
|
||||
.set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
|
||||
.set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
|
||||
.gfx_off_control = smu_v11_0_gfx_off_control,
|
||||
|
|
|
@ -2802,6 +2802,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
|
|||
.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
|
||||
.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
|
||||
.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
|
||||
.set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
|
||||
.set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
|
||||
.set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
|
||||
.gfx_off_control = smu_v11_0_gfx_off_control,
|
||||
|
|
|
@ -1173,6 +1173,35 @@ smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
uint32_t duty100, duty;
|
||||
uint64_t tmp64;
|
||||
|
||||
if (speed > 100)
|
||||
speed = 100;
|
||||
|
||||
if (smu_v11_0_auto_fan_control(smu, 0))
|
||||
return -EINVAL;
|
||||
|
||||
duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
|
||||
CG_FDO_CTRL1, FMAX_DUTY100);
|
||||
if (!duty100)
|
||||
return -EINVAL;
|
||||
|
||||
tmp64 = (uint64_t)speed * duty100;
|
||||
do_div(tmp64, 100);
|
||||
duty = (uint32_t)tmp64;
|
||||
|
||||
WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
|
||||
REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0),
|
||||
CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
|
||||
|
||||
return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
|
||||
}
|
||||
|
||||
int
|
||||
smu_v11_0_set_fan_control_mode(struct smu_context *smu,
|
||||
uint32_t mode)
|
||||
|
@ -1181,7 +1210,7 @@ smu_v11_0_set_fan_control_mode(struct smu_context *smu,
|
|||
|
||||
switch (mode) {
|
||||
case AMD_FAN_CTRL_NONE:
|
||||
ret = smu_v11_0_set_fan_speed_rpm(smu, smu->fan_max_rpm);
|
||||
ret = smu_v11_0_set_fan_speed_percent(smu, 100);
|
||||
break;
|
||||
case AMD_FAN_CTRL_MANUAL:
|
||||
ret = smu_v11_0_auto_fan_control(smu, 0);
|
||||
|
|
|
@ -2755,12 +2755,11 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
|
|||
u32 val;
|
||||
|
||||
ddi_translations = icl_get_mg_buf_trans(encoder, crtc_state, &n_entries);
|
||||
/* The table does not have values for level 3 and level 9. */
|
||||
if (level >= n_entries || level == 3 || level == 9) {
|
||||
if (level >= n_entries) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"DDI translation not found for level %d. Using %d instead.",
|
||||
level, n_entries - 2);
|
||||
level = n_entries - 2;
|
||||
level, n_entries - 1);
|
||||
level = n_entries - 1;
|
||||
}
|
||||
|
||||
/* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */
|
||||
|
|
|
@ -390,6 +390,16 @@ static void emit_batch(struct i915_vma * const vma,
|
|||
&cb_kernel_ivb,
|
||||
desc_count);
|
||||
|
||||
/* Reset inherited context registers */
|
||||
gen7_emit_pipeline_invalidate(&cmds);
|
||||
batch_add(&cmds, MI_LOAD_REGISTER_IMM(2));
|
||||
batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7));
|
||||
batch_add(&cmds, 0xffff0000);
|
||||
batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1));
|
||||
batch_add(&cmds, 0xffff0000 | PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
|
||||
gen7_emit_pipeline_flush(&cmds);
|
||||
|
||||
/* Switch to the media pipeline and our base address */
|
||||
gen7_emit_pipeline_invalidate(&cmds);
|
||||
batch_add(&cmds, PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
|
||||
batch_add(&cmds, MI_NOOP);
|
||||
|
@ -399,9 +409,11 @@ static void emit_batch(struct i915_vma * const vma,
|
|||
gen7_emit_state_base_address(&cmds, descriptors);
|
||||
gen7_emit_pipeline_invalidate(&cmds);
|
||||
|
||||
/* Set the clear-residual kernel state */
|
||||
gen7_emit_vfe_state(&cmds, bv, urb_size - 1, 0, 0);
|
||||
gen7_emit_interface_descriptor_load(&cmds, descriptors, desc_count);
|
||||
|
||||
/* Execute the kernel on all HW threads */
|
||||
for (i = 0; i < num_primitives(bv); i++)
|
||||
gen7_emit_media_object(&cmds, i);
|
||||
|
||||
|
|
|
@ -526,16 +526,39 @@ static int init_ggtt(struct i915_ggtt *ggtt)
|
|||
|
||||
mutex_init(&ggtt->error_mutex);
|
||||
if (ggtt->mappable_end) {
|
||||
/* Reserve a mappable slot for our lockless error capture */
|
||||
ret = drm_mm_insert_node_in_range(&ggtt->vm.mm,
|
||||
&ggtt->error_capture,
|
||||
PAGE_SIZE, 0,
|
||||
I915_COLOR_UNEVICTABLE,
|
||||
0, ggtt->mappable_end,
|
||||
DRM_MM_INSERT_LOW);
|
||||
if (ret)
|
||||
return ret;
|
||||
/*
|
||||
* Reserve a mappable slot for our lockless error capture.
|
||||
*
|
||||
* We strongly prefer taking address 0x0 in order to protect
|
||||
* other critical buffers against accidental overwrites,
|
||||
* as writing to address 0 is a very common mistake.
|
||||
*
|
||||
* Since 0 may already be in use by the system (e.g. the BIOS
|
||||
* framebuffer), we let the reservation fail quietly and hope
|
||||
* 0 remains reserved always.
|
||||
*
|
||||
* If we fail to reserve 0, and then fail to find any space
|
||||
* for an error-capture, remain silent. We can afford not
|
||||
* to reserve an error_capture node as we have fallback
|
||||
* paths, and we trust that 0 will remain reserved. However,
|
||||
* the only likely reason for failure to insert is a driver
|
||||
* bug, which we expect to cause other failures...
|
||||
*/
|
||||
ggtt->error_capture.size = I915_GTT_PAGE_SIZE;
|
||||
ggtt->error_capture.color = I915_COLOR_UNEVICTABLE;
|
||||
if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture))
|
||||
drm_mm_insert_node_in_range(&ggtt->vm.mm,
|
||||
&ggtt->error_capture,
|
||||
ggtt->error_capture.size, 0,
|
||||
ggtt->error_capture.color,
|
||||
0, ggtt->mappable_end,
|
||||
DRM_MM_INSERT_LOW);
|
||||
}
|
||||
if (drm_mm_node_allocated(&ggtt->error_capture))
|
||||
drm_dbg(&ggtt->vm.i915->drm,
|
||||
"Reserved GGTT:[%llx, %llx] for use by error capture\n",
|
||||
ggtt->error_capture.start,
|
||||
ggtt->error_capture.start + ggtt->error_capture.size);
|
||||
|
||||
/*
|
||||
* The upper portion of the GuC address space has a sizeable hole
|
||||
|
@ -548,9 +571,9 @@ static int init_ggtt(struct i915_ggtt *ggtt)
|
|||
|
||||
/* Clear any non-preallocated blocks */
|
||||
drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
|
||||
drm_dbg_kms(&ggtt->vm.i915->drm,
|
||||
"clearing unused GTT space: [%lx, %lx]\n",
|
||||
hole_start, hole_end);
|
||||
drm_dbg(&ggtt->vm.i915->drm,
|
||||
"clearing unused GTT space: [%lx, %lx]\n",
|
||||
hole_start, hole_end);
|
||||
ggtt->vm.clear_range(&ggtt->vm, hole_start,
|
||||
hole_end - hole_start);
|
||||
}
|
||||
|
|
|
@ -631,24 +631,26 @@ static int flush_lazy_signals(struct i915_active *ref)
|
|||
|
||||
int __i915_active_wait(struct i915_active *ref, int state)
|
||||
{
|
||||
int err;
|
||||
|
||||
might_sleep();
|
||||
|
||||
if (!i915_active_acquire_if_busy(ref))
|
||||
return 0;
|
||||
|
||||
/* Any fence added after the wait begins will not be auto-signaled */
|
||||
err = flush_lazy_signals(ref);
|
||||
i915_active_release(ref);
|
||||
if (err)
|
||||
return err;
|
||||
if (i915_active_acquire_if_busy(ref)) {
|
||||
int err;
|
||||
|
||||
if (!i915_active_is_idle(ref) &&
|
||||
___wait_var_event(ref, i915_active_is_idle(ref),
|
||||
state, 0, 0, schedule()))
|
||||
return -EINTR;
|
||||
err = flush_lazy_signals(ref);
|
||||
i915_active_release(ref);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (___wait_var_event(ref, i915_active_is_idle(ref),
|
||||
state, 0, 0, schedule()))
|
||||
return -EINTR;
|
||||
}
|
||||
|
||||
/*
|
||||
* After the wait is complete, the caller may free the active.
|
||||
* We have to flush any concurrent retirement before returning.
|
||||
*/
|
||||
flush_work(&ref->work);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1346,7 +1346,7 @@ intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p)
|
|||
{
|
||||
const unsigned int pi = __platform_mask_index(info, p);
|
||||
|
||||
return info->platform_mask[pi] & INTEL_SUBPLATFORM_BITS;
|
||||
return info->platform_mask[pi] & ((1 << INTEL_SUBPLATFORM_BITS) - 1);
|
||||
}
|
||||
|
||||
static __always_inline bool
|
||||
|
|
|
@ -1880,7 +1880,7 @@ static int igt_cs_tlb(void *arg)
|
|||
vma = i915_vma_instance(out, vm, NULL);
|
||||
if (IS_ERR(vma)) {
|
||||
err = PTR_ERR(vma);
|
||||
goto out_put_batch;
|
||||
goto out_put_out;
|
||||
}
|
||||
|
||||
err = i915_vma_pin(vma, 0, 0,
|
||||
|
|
|
@ -88,7 +88,11 @@ base507c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
|
|||
NVVAL(NV507C, SET_CONVERSION, OFS, 0x64));
|
||||
} else {
|
||||
PUSH_MTHD(push, NV507C, SET_PROCESSING,
|
||||
NVDEF(NV507C, SET_PROCESSING, USE_GAIN_OFS, DISABLE));
|
||||
NVDEF(NV507C, SET_PROCESSING, USE_GAIN_OFS, DISABLE),
|
||||
|
||||
SET_CONVERSION,
|
||||
NVVAL(NV507C, SET_CONVERSION, GAIN, 0) |
|
||||
NVVAL(NV507C, SET_CONVERSION, OFS, 0));
|
||||
}
|
||||
|
||||
PUSH_MTHD(push, NV507C, SURFACE_SET_OFFSET(0, 0), asyw->image.offset[0] >> 8);
|
||||
|
|
|
@ -49,7 +49,11 @@ base827c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
|
|||
NVVAL(NV827C, SET_CONVERSION, OFS, 0x64));
|
||||
} else {
|
||||
PUSH_MTHD(push, NV827C, SET_PROCESSING,
|
||||
NVDEF(NV827C, SET_PROCESSING, USE_GAIN_OFS, DISABLE));
|
||||
NVDEF(NV827C, SET_PROCESSING, USE_GAIN_OFS, DISABLE),
|
||||
|
||||
SET_CONVERSION,
|
||||
NVVAL(NV827C, SET_CONVERSION, GAIN, 0) |
|
||||
NVVAL(NV827C, SET_CONVERSION, OFS, 0));
|
||||
}
|
||||
|
||||
PUSH_MTHD(push, NV827C, SURFACE_SET_OFFSET(0, 0), asyw->image.offset[0] >> 8,
|
||||
|
|
|
@ -2663,6 +2663,14 @@ nv50_display_create(struct drm_device *dev)
|
|||
else
|
||||
nouveau_display(dev)->format_modifiers = disp50xx_modifiers;
|
||||
|
||||
if (disp->disp->object.oclass >= GK104_DISP) {
|
||||
dev->mode_config.cursor_width = 256;
|
||||
dev->mode_config.cursor_height = 256;
|
||||
} else {
|
||||
dev->mode_config.cursor_width = 64;
|
||||
dev->mode_config.cursor_height = 64;
|
||||
}
|
||||
|
||||
/* create crtc objects to represent the hw heads */
|
||||
if (disp->disp->object.oclass >= GV100_DISP)
|
||||
crtcs = nvif_rd32(&device->object, 0x610060) & 0xff;
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include "head.h"
|
||||
#include "core.h"
|
||||
|
||||
#include "nvif/push.h"
|
||||
#include <nvif/push507c.h>
|
||||
|
||||
#include <nvhw/class/cl917d.h>
|
||||
|
@ -73,6 +74,31 @@ head917d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
head917d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
|
||||
{
|
||||
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
|
||||
const int i = head->base.index;
|
||||
int ret;
|
||||
|
||||
ret = PUSH_WAIT(push, 5);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
PUSH_MTHD(push, NV917D, HEAD_SET_CONTROL_CURSOR(i),
|
||||
NVDEF(NV917D, HEAD_SET_CONTROL_CURSOR, ENABLE, ENABLE) |
|
||||
NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, FORMAT, asyh->curs.format) |
|
||||
NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, SIZE, asyh->curs.layout) |
|
||||
NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_X, 0) |
|
||||
NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_Y, 0) |
|
||||
NVDEF(NV917D, HEAD_SET_CONTROL_CURSOR, COMPOSITION, ALPHA_BLEND),
|
||||
|
||||
HEAD_SET_OFFSET_CURSOR(i), asyh->curs.offset >> 8);
|
||||
|
||||
PUSH_MTHD(push, NV917D, HEAD_SET_CONTEXT_DMA_CURSOR(i), asyh->curs.handle);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
head917d_curs_layout(struct nv50_head *head, struct nv50_wndw_atom *asyw,
|
||||
struct nv50_head_atom *asyh)
|
||||
|
@ -101,7 +127,7 @@ head917d = {
|
|||
.core_clr = head907d_core_clr,
|
||||
.curs_layout = head917d_curs_layout,
|
||||
.curs_format = head507d_curs_format,
|
||||
.curs_set = head907d_curs_set,
|
||||
.curs_set = head917d_curs_set,
|
||||
.curs_clr = head907d_curs_clr,
|
||||
.base = head917d_base,
|
||||
.ovly = head907d_ovly,
|
||||
|
|
|
@ -702,6 +702,11 @@ nv50_wndw_init(struct nv50_wndw *wndw)
|
|||
nvif_notify_get(&wndw->notify);
|
||||
}
|
||||
|
||||
static const u64 nv50_cursor_format_modifiers[] = {
|
||||
DRM_FORMAT_MOD_LINEAR,
|
||||
DRM_FORMAT_MOD_INVALID,
|
||||
};
|
||||
|
||||
int
|
||||
nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
|
||||
enum drm_plane_type type, const char *name, int index,
|
||||
|
@ -713,6 +718,7 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
|
|||
struct nvif_mmu *mmu = &drm->client.mmu;
|
||||
struct nv50_disp *disp = nv50_disp(dev);
|
||||
struct nv50_wndw *wndw;
|
||||
const u64 *format_modifiers;
|
||||
int nformat;
|
||||
int ret;
|
||||
|
||||
|
@ -728,10 +734,13 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
|
|||
|
||||
for (nformat = 0; format[nformat]; nformat++);
|
||||
|
||||
ret = drm_universal_plane_init(dev, &wndw->plane, heads, &nv50_wndw,
|
||||
format, nformat,
|
||||
nouveau_display(dev)->format_modifiers,
|
||||
type, "%s-%d", name, index);
|
||||
if (type == DRM_PLANE_TYPE_CURSOR)
|
||||
format_modifiers = nv50_cursor_format_modifiers;
|
||||
else
|
||||
format_modifiers = nouveau_display(dev)->format_modifiers;
|
||||
|
||||
ret = drm_universal_plane_init(dev, &wndw->plane, heads, &nv50_wndw, format, nformat,
|
||||
format_modifiers, type, "%s-%d", name, index);
|
||||
if (ret) {
|
||||
kfree(*pwndw);
|
||||
*pwndw = NULL;
|
||||
|
|
|
@ -66,6 +66,10 @@
|
|||
#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_ALPHA_BLEND (0x00000000)
|
||||
#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_PREMULT_ALPHA_BLEND (0x00000001)
|
||||
#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_XOR (0x00000002)
|
||||
#define NV917D_HEAD_SET_OFFSET_CURSOR(a) (0x00000484 + (a)*0x00000300)
|
||||
#define NV917D_HEAD_SET_OFFSET_CURSOR_ORIGIN 31:0
|
||||
#define NV917D_HEAD_SET_CONTEXT_DMA_CURSOR(a) (0x0000048C + (a)*0x00000300)
|
||||
#define NV917D_HEAD_SET_CONTEXT_DMA_CURSOR_HANDLE 31:0
|
||||
#define NV917D_HEAD_SET_DITHER_CONTROL(a) (0x000004A0 + (a)*0x00000300)
|
||||
#define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0
|
||||
#define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000)
|
||||
|
|
|
@ -123,131 +123,131 @@ PUSH_KICK(struct nvif_push *push)
|
|||
} while(0)
|
||||
#endif
|
||||
|
||||
#define PUSH_1(X,f,ds,n,c,o,p,s,mA,dA) do { \
|
||||
PUSH_##o##_HDR((p), s, mA, (c)+(n)); \
|
||||
PUSH_##f(X, (p), X##mA, 1, o, (dA), ds, ""); \
|
||||
#define PUSH_1(X,f,ds,n,o,p,s,mA,dA) do { \
|
||||
PUSH_##o##_HDR((p), s, mA, (ds)+(n)); \
|
||||
PUSH_##f(X, (p), X##mA, 1, o, (dA), ds, ""); \
|
||||
} while(0)
|
||||
#define PUSH_2(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
|
||||
PUSH_ASSERT((mB) - (mA) == (1?PUSH_##o##_INC), "mthd1"); \
|
||||
PUSH_1(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
|
||||
PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
|
||||
#define PUSH_2(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
|
||||
PUSH_ASSERT((mB) - (mA) == (1?PUSH_##o##_INC), "mthd1"); \
|
||||
PUSH_1(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
|
||||
PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
|
||||
} while(0)
|
||||
#define PUSH_3(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
|
||||
PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd2"); \
|
||||
PUSH_2(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
|
||||
PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
|
||||
#define PUSH_3(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
|
||||
PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd2"); \
|
||||
PUSH_2(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
|
||||
PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
|
||||
} while(0)
|
||||
#define PUSH_4(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
|
||||
PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd3"); \
|
||||
PUSH_3(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
|
||||
PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
|
||||
#define PUSH_4(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
|
||||
PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd3"); \
|
||||
PUSH_3(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
|
||||
PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
|
||||
} while(0)
|
||||
#define PUSH_5(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
|
||||
PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd4"); \
|
||||
PUSH_4(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
|
||||
PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
|
||||
#define PUSH_5(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
|
||||
PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd4"); \
|
||||
PUSH_4(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
|
||||
PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
|
||||
} while(0)
|
||||
#define PUSH_6(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
|
||||
PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd5"); \
|
||||
PUSH_5(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
|
||||
PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
|
||||
#define PUSH_6(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
|
||||
PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd5"); \
|
||||
PUSH_5(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
|
||||
PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
|
||||
} while(0)
|
||||
#define PUSH_7(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
|
||||
PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd6"); \
|
||||
PUSH_6(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
|
||||
PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
|
||||
#define PUSH_7(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
|
||||
PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd6"); \
|
||||
PUSH_6(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
|
||||
PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
|
||||
} while(0)
|
||||
#define PUSH_8(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
|
||||
PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd7"); \
|
||||
PUSH_7(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
|
||||
PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
|
||||
#define PUSH_8(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
|
||||
PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd7"); \
|
||||
PUSH_7(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
|
||||
PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
|
||||
} while(0)
|
||||
#define PUSH_9(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
|
||||
PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd8"); \
|
||||
PUSH_8(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
|
||||
PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
|
||||
#define PUSH_9(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
|
||||
PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd8"); \
|
||||
PUSH_8(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
|
||||
PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
|
||||
} while(0)
|
||||
#define PUSH_10(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
|
||||
PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd9"); \
|
||||
PUSH_9(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
|
||||
PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
|
||||
#define PUSH_10(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
|
||||
PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd9"); \
|
||||
PUSH_9(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
|
||||
PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
|
||||
} while(0)
|
||||
|
||||
#define PUSH_1D(X,o,p,s,mA,dA) \
|
||||
PUSH_1(X, DATA_, 1, 1, 0, o, (p), s, X##mA, (dA))
|
||||
#define PUSH_2D(X,o,p,s,mA,dA,mB,dB) \
|
||||
PUSH_2(X, DATA_, 1, 1, 0, o, (p), s, X##mB, (dB), \
|
||||
X##mA, (dA))
|
||||
#define PUSH_3D(X,o,p,s,mA,dA,mB,dB,mC,dC) \
|
||||
PUSH_3(X, DATA_, 1, 1, 0, o, (p), s, X##mC, (dC), \
|
||||
X##mB, (dB), \
|
||||
X##mA, (dA))
|
||||
#define PUSH_4D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD) \
|
||||
PUSH_4(X, DATA_, 1, 1, 0, o, (p), s, X##mD, (dD), \
|
||||
X##mC, (dC), \
|
||||
X##mB, (dB), \
|
||||
X##mA, (dA))
|
||||
#define PUSH_5D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE) \
|
||||
PUSH_5(X, DATA_, 1, 1, 0, o, (p), s, X##mE, (dE), \
|
||||
X##mD, (dD), \
|
||||
X##mC, (dC), \
|
||||
X##mB, (dB), \
|
||||
X##mA, (dA))
|
||||
#define PUSH_1D(X,o,p,s,mA,dA) \
|
||||
PUSH_1(X, DATA_, 1, 0, o, (p), s, X##mA, (dA))
|
||||
#define PUSH_2D(X,o,p,s,mA,dA,mB,dB) \
|
||||
PUSH_2(X, DATA_, 1, 0, o, (p), s, X##mB, (dB), \
|
||||
X##mA, (dA))
|
||||
#define PUSH_3D(X,o,p,s,mA,dA,mB,dB,mC,dC) \
|
||||
PUSH_3(X, DATA_, 1, 0, o, (p), s, X##mC, (dC), \
|
||||
X##mB, (dB), \
|
||||
X##mA, (dA))
|
||||
#define PUSH_4D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD) \
|
||||
PUSH_4(X, DATA_, 1, 0, o, (p), s, X##mD, (dD), \
|
||||
X##mC, (dC), \
|
||||
X##mB, (dB), \
|
||||
X##mA, (dA))
|
||||
#define PUSH_5D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE) \
|
||||
PUSH_5(X, DATA_, 1, 0, o, (p), s, X##mE, (dE), \
|
||||
X##mD, (dD), \
|
||||
X##mC, (dC), \
|
||||
X##mB, (dB), \
|
||||
X##mA, (dA))
|
||||
#define PUSH_6D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF) \
|
||||
PUSH_6(X, DATA_, 1, 1, 0, o, (p), s, X##mF, (dF), \
|
||||
X##mE, (dE), \
|
||||
X##mD, (dD), \
|
||||
X##mC, (dC), \
|
||||
X##mB, (dB), \
|
||||
X##mA, (dA))
|
||||
PUSH_6(X, DATA_, 1, 0, o, (p), s, X##mF, (dF), \
|
||||
X##mE, (dE), \
|
||||
X##mD, (dD), \
|
||||
X##mC, (dC), \
|
||||
X##mB, (dB), \
|
||||
X##mA, (dA))
|
||||
#define PUSH_7D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG) \
|
||||
PUSH_7(X, DATA_, 1, 1, 0, o, (p), s, X##mG, (dG), \
|
||||
X##mF, (dF), \
|
||||
X##mE, (dE), \
|
||||
X##mD, (dD), \
|
||||
X##mC, (dC), \
|
||||
X##mB, (dB), \
|
||||
X##mA, (dA))
|
||||
PUSH_7(X, DATA_, 1, 0, o, (p), s, X##mG, (dG), \
|
||||
X##mF, (dF), \
|
||||
X##mE, (dE), \
|
||||
X##mD, (dD), \
|
||||
X##mC, (dC), \
|
||||
X##mB, (dB), \
|
||||
X##mA, (dA))
|
||||
#define PUSH_8D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH) \
|
||||
PUSH_8(X, DATA_, 1, 1, 0, o, (p), s, X##mH, (dH), \
|
||||
X##mG, (dG), \
|
||||
X##mF, (dF), \
|
||||
X##mE, (dE), \
|
||||
X##mD, (dD), \
|
||||
X##mC, (dC), \
|
||||
X##mB, (dB), \
|
||||
X##mA, (dA))
|
||||
PUSH_8(X, DATA_, 1, 0, o, (p), s, X##mH, (dH), \
|
||||
X##mG, (dG), \
|
||||
X##mF, (dF), \
|
||||
X##mE, (dE), \
|
||||
X##mD, (dD), \
|
||||
X##mC, (dC), \
|
||||
X##mB, (dB), \
|
||||
X##mA, (dA))
|
||||
#define PUSH_9D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH,mI,dI) \
|
||||
PUSH_9(X, DATA_, 1, 1, 0, o, (p), s, X##mI, (dI), \
|
||||
X##mH, (dH), \
|
||||
X##mG, (dG), \
|
||||
X##mF, (dF), \
|
||||
X##mE, (dE), \
|
||||
X##mD, (dD), \
|
||||
X##mC, (dC), \
|
||||
X##mB, (dB), \
|
||||
X##mA, (dA))
|
||||
PUSH_9(X, DATA_, 1, 0, o, (p), s, X##mI, (dI), \
|
||||
X##mH, (dH), \
|
||||
X##mG, (dG), \
|
||||
X##mF, (dF), \
|
||||
X##mE, (dE), \
|
||||
X##mD, (dD), \
|
||||
X##mC, (dC), \
|
||||
X##mB, (dB), \
|
||||
X##mA, (dA))
|
||||
#define PUSH_10D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH,mI,dI,mJ,dJ) \
|
||||
PUSH_10(X, DATA_, 1, 1, 0, o, (p), s, X##mJ, (dJ), \
|
||||
X##mI, (dI), \
|
||||
X##mH, (dH), \
|
||||
X##mG, (dG), \
|
||||
X##mF, (dF), \
|
||||
X##mE, (dE), \
|
||||
X##mD, (dD), \
|
||||
X##mC, (dC), \
|
||||
X##mB, (dB), \
|
||||
X##mA, (dA))
|
||||
PUSH_10(X, DATA_, 1, 0, o, (p), s, X##mJ, (dJ), \
|
||||
X##mI, (dI), \
|
||||
X##mH, (dH), \
|
||||
X##mG, (dG), \
|
||||
X##mF, (dF), \
|
||||
X##mE, (dE), \
|
||||
X##mD, (dD), \
|
||||
X##mC, (dC), \
|
||||
X##mB, (dB), \
|
||||
X##mA, (dA))
|
||||
|
||||
#define PUSH_1P(X,o,p,s,mA,dp,ds) \
|
||||
PUSH_1(X, DATAp, ds, ds, 0, o, (p), s, X##mA, (dp))
|
||||
#define PUSH_2P(X,o,p,s,mA,dA,mB,dp,ds) \
|
||||
PUSH_2(X, DATAp, ds, ds, 0, o, (p), s, X##mB, (dp), \
|
||||
X##mA, (dA))
|
||||
#define PUSH_3P(X,o,p,s,mA,dA,mB,dB,mC,dp,ds) \
|
||||
PUSH_3(X, DATAp, ds, ds, 0, o, (p), s, X##mC, (dp), \
|
||||
X##mB, (dB), \
|
||||
X##mA, (dA))
|
||||
#define PUSH_1P(X,o,p,s,mA,dp,ds) \
|
||||
PUSH_1(X, DATAp, ds, 0, o, (p), s, X##mA, (dp))
|
||||
#define PUSH_2P(X,o,p,s,mA,dA,mB,dp,ds) \
|
||||
PUSH_2(X, DATAp, ds, 0, o, (p), s, X##mB, (dp), \
|
||||
X##mA, (dA))
|
||||
#define PUSH_3P(X,o,p,s,mA,dA,mB,dB,mC,dp,ds) \
|
||||
PUSH_3(X, DATAp, ds, 0, o, (p), s, X##mC, (dp), \
|
||||
X##mB, (dB), \
|
||||
X##mA, (dA))
|
||||
|
||||
#define PUSH_(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,IMPL,...) IMPL
|
||||
#define PUSH(A...) PUSH_(A, PUSH_10P, PUSH_10D, \
|
||||
|
|
|
@ -315,6 +315,10 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
|
|||
struct drm_nouveau_svm_init *args = data;
|
||||
int ret;
|
||||
|
||||
/* We need to fail if svm is disabled */
|
||||
if (!cli->drm->svm)
|
||||
return -ENOSYS;
|
||||
|
||||
/* Allocate tracking for SVM-enabled VMM. */
|
||||
if (!(svmm = kzalloc(sizeof(*svmm), GFP_KERNEL)))
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -620,11 +620,11 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
|
|||
* for now we just allocate globally.
|
||||
*/
|
||||
if (!hvs->hvs5)
|
||||
/* 96kB */
|
||||
drm_mm_init(&hvs->lbm_mm, 0, 96 * 1024);
|
||||
/* 48k words of 2x12-bit pixels */
|
||||
drm_mm_init(&hvs->lbm_mm, 0, 48 * 1024);
|
||||
else
|
||||
/* 70k words */
|
||||
drm_mm_init(&hvs->lbm_mm, 0, 70 * 2 * 1024);
|
||||
/* 60k words of 4x12-bit pixels */
|
||||
drm_mm_init(&hvs->lbm_mm, 0, 60 * 1024);
|
||||
|
||||
/* Upload filter kernels. We only have the one for now, so we
|
||||
* keep it around for the lifetime of the driver.
|
||||
|
|
|
@ -437,6 +437,7 @@ static void vc4_write_ppf(struct vc4_plane_state *vc4_state, u32 src, u32 dst)
|
|||
static u32 vc4_lbm_size(struct drm_plane_state *state)
|
||||
{
|
||||
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
|
||||
struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
|
||||
u32 pix_per_line;
|
||||
u32 lbm;
|
||||
|
||||
|
@ -472,7 +473,11 @@ static u32 vc4_lbm_size(struct drm_plane_state *state)
|
|||
lbm = pix_per_line * 16;
|
||||
}
|
||||
|
||||
lbm = roundup(lbm, 32);
|
||||
/* Align it to 64 or 128 (hvs5) bytes */
|
||||
lbm = roundup(lbm, vc4->hvs->hvs5 ? 128 : 64);
|
||||
|
||||
/* Each "word" of the LBM memory contains 2 or 4 (hvs5) pixels */
|
||||
lbm /= vc4->hvs->hvs5 ? 4 : 2;
|
||||
|
||||
return lbm;
|
||||
}
|
||||
|
@ -912,9 +917,9 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
|
|||
if (!vc4_state->is_unity) {
|
||||
vc4_dlist_write(vc4_state,
|
||||
VC4_SET_FIELD(vc4_state->crtc_w,
|
||||
SCALER_POS1_SCL_WIDTH) |
|
||||
SCALER5_POS1_SCL_WIDTH) |
|
||||
VC4_SET_FIELD(vc4_state->crtc_h,
|
||||
SCALER_POS1_SCL_HEIGHT));
|
||||
SCALER5_POS1_SCL_HEIGHT));
|
||||
}
|
||||
|
||||
/* Position Word 2: Source Image Size */
|
||||
|
|
|
@ -84,12 +84,9 @@ static inline bool is_rd890_iommu(struct pci_dev *pdev)
|
|||
(pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
|
||||
}
|
||||
|
||||
static inline bool iommu_feature(struct amd_iommu *iommu, u64 f)
|
||||
static inline bool iommu_feature(struct amd_iommu *iommu, u64 mask)
|
||||
{
|
||||
if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
|
||||
return false;
|
||||
|
||||
return !!(iommu->features & f);
|
||||
return !!(iommu->features & mask);
|
||||
}
|
||||
|
||||
static inline u64 iommu_virt_to_phys(void *vaddr)
|
||||
|
|
|
@ -387,6 +387,10 @@
|
|||
#define IOMMU_CAP_NPCACHE 26
|
||||
#define IOMMU_CAP_EFR 27
|
||||
|
||||
/* IOMMU IVINFO */
|
||||
#define IOMMU_IVINFO_OFFSET 36
|
||||
#define IOMMU_IVINFO_EFRSUP BIT(0)
|
||||
|
||||
/* IOMMU Feature Reporting Field (for IVHD type 10h */
|
||||
#define IOMMU_FEAT_GASUP_SHIFT 6
|
||||
|
||||
|
|
|
@ -257,6 +257,8 @@ static void init_device_table_dma(void);
|
|||
|
||||
static bool amd_iommu_pre_enabled = true;
|
||||
|
||||
static u32 amd_iommu_ivinfo __initdata;
|
||||
|
||||
bool translation_pre_enabled(struct amd_iommu *iommu)
|
||||
{
|
||||
return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
|
||||
|
@ -296,6 +298,18 @@ int amd_iommu_get_num_iommus(void)
|
|||
return amd_iommus_present;
|
||||
}
|
||||
|
||||
/*
|
||||
* For IVHD type 0x11/0x40, EFR is also available via IVHD.
|
||||
* Default to IVHD EFR since it is available sooner
|
||||
* (i.e. before PCI init).
|
||||
*/
|
||||
static void __init early_iommu_features_init(struct amd_iommu *iommu,
|
||||
struct ivhd_header *h)
|
||||
{
|
||||
if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP)
|
||||
iommu->features = h->efr_reg;
|
||||
}
|
||||
|
||||
/* Access to l1 and l2 indexed register spaces */
|
||||
|
||||
static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
|
||||
|
@ -1577,6 +1591,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
|
|||
|
||||
if (h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT))
|
||||
amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
|
||||
|
||||
early_iommu_features_init(iommu, h);
|
||||
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@ -1770,6 +1787,35 @@ static const struct attribute_group *amd_iommu_groups[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
/*
|
||||
* Note: IVHD 0x11 and 0x40 also contains exact copy
|
||||
* of the IOMMU Extended Feature Register [MMIO Offset 0030h].
|
||||
* Default to EFR in IVHD since it is available sooner (i.e. before PCI init).
|
||||
*/
|
||||
static void __init late_iommu_features_init(struct amd_iommu *iommu)
|
||||
{
|
||||
u64 features;
|
||||
|
||||
if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
|
||||
return;
|
||||
|
||||
/* read extended feature bits */
|
||||
features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
|
||||
|
||||
if (!iommu->features) {
|
||||
iommu->features = features;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Sanity check and warn if EFR values from
|
||||
* IVHD and MMIO conflict.
|
||||
*/
|
||||
if (features != iommu->features)
|
||||
pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx\n).",
|
||||
features, iommu->features);
|
||||
}
|
||||
|
||||
static int __init iommu_init_pci(struct amd_iommu *iommu)
|
||||
{
|
||||
int cap_ptr = iommu->cap_ptr;
|
||||
|
@ -1789,8 +1835,7 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
|
|||
if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
|
||||
amd_iommu_iotlb_sup = false;
|
||||
|
||||
/* read extended feature bits */
|
||||
iommu->features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
|
||||
late_iommu_features_init(iommu);
|
||||
|
||||
if (iommu_feature(iommu, FEATURE_GT)) {
|
||||
int glxval;
|
||||
|
@ -2607,6 +2652,11 @@ static void __init free_dma_resources(void)
|
|||
free_unity_maps();
|
||||
}
|
||||
|
||||
static void __init ivinfo_init(void *ivrs)
|
||||
{
|
||||
amd_iommu_ivinfo = *((u32 *)(ivrs + IOMMU_IVINFO_OFFSET));
|
||||
}
|
||||
|
||||
/*
|
||||
* This is the hardware init function for AMD IOMMU in the system.
|
||||
* This function is called either from amd_iommu_init or from the interrupt
|
||||
|
@ -2661,6 +2711,8 @@ static int __init early_amd_iommu_init(void)
|
|||
if (ret)
|
||||
goto out;
|
||||
|
||||
ivinfo_init(ivrs_base);
|
||||
|
||||
amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
|
||||
DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
|
||||
|
||||
|
|
|
@ -1496,7 +1496,7 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
|
|||
* Max Invs Pending (MIP) is set to 0 for now until we have DIT in
|
||||
* ECAP.
|
||||
*/
|
||||
if (addr & GENMASK_ULL(size_order + VTD_PAGE_SHIFT, 0))
|
||||
if (!IS_ALIGNED(addr, VTD_PAGE_SIZE << size_order))
|
||||
pr_warn_ratelimited("Invalidate non-aligned address %llx, order %d\n",
|
||||
addr, size_order);
|
||||
|
||||
|
|
|
@ -5440,6 +5440,36 @@ intel_iommu_domain_set_attr(struct iommu_domain *domain,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static bool domain_use_flush_queue(void)
|
||||
{
|
||||
struct dmar_drhd_unit *drhd;
|
||||
struct intel_iommu *iommu;
|
||||
bool r = true;
|
||||
|
||||
if (intel_iommu_strict)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* The flush queue implementation does not perform page-selective
|
||||
* invalidations that are required for efficient TLB flushes in virtual
|
||||
* environments. The benefit of batching is likely to be much lower than
|
||||
* the overhead of synchronizing the virtual and physical IOMMU
|
||||
* page-tables.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
for_each_active_iommu(iommu, drhd) {
|
||||
if (!cap_caching_mode(iommu->cap))
|
||||
continue;
|
||||
|
||||
pr_warn_once("IOMMU batching is disabled due to virtualization");
|
||||
r = false;
|
||||
break;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static int
|
||||
intel_iommu_domain_get_attr(struct iommu_domain *domain,
|
||||
enum iommu_attr attr, void *data)
|
||||
|
@ -5450,7 +5480,7 @@ intel_iommu_domain_get_attr(struct iommu_domain *domain,
|
|||
case IOMMU_DOMAIN_DMA:
|
||||
switch (attr) {
|
||||
case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
|
||||
*(int *)data = !intel_iommu_strict;
|
||||
*(int *)data = domain_use_flush_queue();
|
||||
return 0;
|
||||
default:
|
||||
return -ENODEV;
|
||||
|
|
|
@ -33,6 +33,8 @@
|
|||
#define BCH_FEATURE_COMPAT_FUNCS(name, flagname) \
|
||||
static inline int bch_has_feature_##name(struct cache_sb *sb) \
|
||||
{ \
|
||||
if (sb->version < BCACHE_SB_VERSION_CDEV_WITH_FEATURES) \
|
||||
return 0; \
|
||||
return (((sb)->feature_compat & \
|
||||
BCH##_FEATURE_COMPAT_##flagname) != 0); \
|
||||
} \
|
||||
|
@ -50,6 +52,8 @@ static inline void bch_clear_feature_##name(struct cache_sb *sb) \
|
|||
#define BCH_FEATURE_RO_COMPAT_FUNCS(name, flagname) \
|
||||
static inline int bch_has_feature_##name(struct cache_sb *sb) \
|
||||
{ \
|
||||
if (sb->version < BCACHE_SB_VERSION_CDEV_WITH_FEATURES) \
|
||||
return 0; \
|
||||
return (((sb)->feature_ro_compat & \
|
||||
BCH##_FEATURE_RO_COMPAT_##flagname) != 0); \
|
||||
} \
|
||||
|
@ -67,6 +71,8 @@ static inline void bch_clear_feature_##name(struct cache_sb *sb) \
|
|||
#define BCH_FEATURE_INCOMPAT_FUNCS(name, flagname) \
|
||||
static inline int bch_has_feature_##name(struct cache_sb *sb) \
|
||||
{ \
|
||||
if (sb->version < BCACHE_SB_VERSION_CDEV_WITH_FEATURES) \
|
||||
return 0; \
|
||||
return (((sb)->feature_incompat & \
|
||||
BCH##_FEATURE_INCOMPAT_##flagname) != 0); \
|
||||
} \
|
||||
|
|
|
@ -3829,7 +3829,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
|
|||
}
|
||||
}
|
||||
|
||||
list_add_tail(&ns->siblings, &head->list);
|
||||
list_add_tail_rcu(&ns->siblings, &head->list);
|
||||
ns->head = head;
|
||||
mutex_unlock(&ctrl->subsys->lock);
|
||||
return 0;
|
||||
|
|
|
@ -221,7 +221,7 @@ static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head,
|
|||
}
|
||||
|
||||
for (ns = nvme_next_ns(head, old);
|
||||
ns != old;
|
||||
ns && ns != old;
|
||||
ns = nvme_next_ns(head, ns)) {
|
||||
if (nvme_path_is_disabled(ns))
|
||||
continue;
|
||||
|
|
|
@ -3257,6 +3257,8 @@ static const struct pci_device_id nvme_id_table[] = {
|
|||
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
|
||||
{ PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */
|
||||
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
|
||||
{ PCI_DEVICE(0x1d97, 0x2263), /* SPCC */
|
||||
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
|
||||
.driver_data = NVME_QUIRK_SINGLE_VECTOR },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
|
||||
|
|
|
@ -1874,18 +1874,26 @@ void dasd_path_create_kobjects(struct dasd_device *device)
|
|||
}
|
||||
EXPORT_SYMBOL(dasd_path_create_kobjects);
|
||||
|
||||
/*
|
||||
* As we keep kobjects for the lifetime of a device, this function must not be
|
||||
* called anywhere but in the context of offlining a device.
|
||||
*/
|
||||
void dasd_path_remove_kobj(struct dasd_device *device, int chp)
|
||||
static void dasd_path_remove_kobj(struct dasd_device *device, int chp)
|
||||
{
|
||||
if (device->path[chp].in_sysfs) {
|
||||
kobject_put(&device->path[chp].kobj);
|
||||
device->path[chp].in_sysfs = false;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(dasd_path_remove_kobj);
|
||||
|
||||
/*
|
||||
* As we keep kobjects for the lifetime of a device, this function must not be
|
||||
* called anywhere but in the context of offlining a device.
|
||||
*/
|
||||
void dasd_path_remove_kobjects(struct dasd_device *device)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
dasd_path_remove_kobj(device, i);
|
||||
}
|
||||
EXPORT_SYMBOL(dasd_path_remove_kobjects);
|
||||
|
||||
int dasd_add_sysfs_files(struct ccw_device *cdev)
|
||||
{
|
||||
|
|
|
@ -1036,7 +1036,6 @@ static void dasd_eckd_clear_conf_data(struct dasd_device *device)
|
|||
device->path[i].ssid = 0;
|
||||
device->path[i].chpid = 0;
|
||||
dasd_path_notoper(device, i);
|
||||
dasd_path_remove_kobj(device, i);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2173,6 +2172,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
|
|||
device->block = NULL;
|
||||
out_err1:
|
||||
dasd_eckd_clear_conf_data(device);
|
||||
dasd_path_remove_kobjects(device);
|
||||
kfree(device->private);
|
||||
device->private = NULL;
|
||||
return rc;
|
||||
|
@ -2191,6 +2191,7 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device)
|
|||
private->vdsneq = NULL;
|
||||
private->gneq = NULL;
|
||||
dasd_eckd_clear_conf_data(device);
|
||||
dasd_path_remove_kobjects(device);
|
||||
}
|
||||
|
||||
static struct dasd_ccw_req *
|
||||
|
|
|
@ -858,7 +858,7 @@ int dasd_add_sysfs_files(struct ccw_device *);
|
|||
void dasd_remove_sysfs_files(struct ccw_device *);
|
||||
void dasd_path_create_kobj(struct dasd_device *, int);
|
||||
void dasd_path_create_kobjects(struct dasd_device *);
|
||||
void dasd_path_remove_kobj(struct dasd_device *, int);
|
||||
void dasd_path_remove_kobjects(struct dasd_device *);
|
||||
|
||||
struct dasd_device *dasd_device_from_cdev(struct ccw_device *);
|
||||
struct dasd_device *dasd_device_from_cdev_locked(struct ccw_device *);
|
||||
|
|
|
@ -1026,9 +1026,8 @@ void tty_write_message(struct tty_struct *tty, char *msg)
|
|||
* write method will not be invoked in parallel for each device.
|
||||
*/
|
||||
|
||||
static ssize_t tty_write(struct kiocb *iocb, struct iov_iter *from)
|
||||
static ssize_t file_tty_write(struct file *file, struct kiocb *iocb, struct iov_iter *from)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct tty_struct *tty = file_tty(file);
|
||||
struct tty_ldisc *ld;
|
||||
ssize_t ret;
|
||||
|
@ -1051,6 +1050,11 @@ static ssize_t tty_write(struct kiocb *iocb, struct iov_iter *from)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t tty_write(struct kiocb *iocb, struct iov_iter *from)
|
||||
{
|
||||
return file_tty_write(iocb->ki_filp, iocb, from);
|
||||
}
|
||||
|
||||
ssize_t redirected_tty_write(struct kiocb *iocb, struct iov_iter *iter)
|
||||
{
|
||||
struct file *p = NULL;
|
||||
|
@ -1060,9 +1064,13 @@ ssize_t redirected_tty_write(struct kiocb *iocb, struct iov_iter *iter)
|
|||
p = get_file(redirect);
|
||||
spin_unlock(&redirect_lock);
|
||||
|
||||
/*
|
||||
* We know the redirected tty is just another tty, we can can
|
||||
* call file_tty_write() directly with that file pointer.
|
||||
*/
|
||||
if (p) {
|
||||
ssize_t res;
|
||||
res = vfs_iocb_iter_write(p, iocb, iter);
|
||||
res = file_tty_write(p, iocb, iter);
|
||||
fput(p);
|
||||
return res;
|
||||
}
|
||||
|
@ -2308,6 +2316,12 @@ static int tioccons(struct file *file)
|
|||
fput(f);
|
||||
return 0;
|
||||
}
|
||||
if (file->f_op->write_iter != tty_write)
|
||||
return -ENOTTY;
|
||||
if (!(file->f_mode & FMODE_WRITE))
|
||||
return -EBADF;
|
||||
if (!(file->f_mode & FMODE_CAN_WRITE))
|
||||
return -EINVAL;
|
||||
spin_lock(&redirect_lock);
|
||||
if (redirect) {
|
||||
spin_unlock(&redirect_lock);
|
||||
|
|
|
@ -130,7 +130,15 @@ EXPORT_SYMBOL(truncate_bdev_range);
|
|||
|
||||
static void set_init_blocksize(struct block_device *bdev)
|
||||
{
|
||||
bdev->bd_inode->i_blkbits = blksize_bits(bdev_logical_block_size(bdev));
|
||||
unsigned int bsize = bdev_logical_block_size(bdev);
|
||||
loff_t size = i_size_read(bdev->bd_inode);
|
||||
|
||||
while (bsize < PAGE_SIZE) {
|
||||
if (size & bsize)
|
||||
break;
|
||||
bsize <<= 1;
|
||||
}
|
||||
bdev->bd_inode->i_blkbits = blksize_bits(bsize);
|
||||
}
|
||||
|
||||
int set_blocksize(struct block_device *bdev, int size)
|
||||
|
|
|
@ -673,7 +673,15 @@ static noinline void caching_thread(struct btrfs_work *work)
|
|||
wake_up(&caching_ctl->wait);
|
||||
}
|
||||
|
||||
if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
|
||||
/*
|
||||
* If we are in the transaction that populated the free space tree we
|
||||
* can't actually cache from the free space tree as our commit root and
|
||||
* real root are the same, so we could change the contents of the blocks
|
||||
* while caching. Instead do the slow caching in this case, and after
|
||||
* the transaction has committed we will be safe.
|
||||
*/
|
||||
if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
|
||||
!(test_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags)))
|
||||
ret = load_free_space_tree(caching_ctl);
|
||||
else
|
||||
ret = load_extent_tree_free(caching_ctl);
|
||||
|
|
|
@ -563,6 +563,9 @@ enum {
|
|||
|
||||
/* Indicate that we need to cleanup space cache v1 */
|
||||
BTRFS_FS_CLEANUP_SPACE_CACHE_V1,
|
||||
|
||||
/* Indicate that we can't trust the free space tree for caching yet */
|
||||
BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED,
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -2602,8 +2602,6 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_block_group *cache;
|
||||
int ret;
|
||||
|
||||
btrfs_add_excluded_extent(trans->fs_info, bytenr, num_bytes);
|
||||
|
||||
cache = btrfs_lookup_block_group(trans->fs_info, bytenr);
|
||||
if (!cache)
|
||||
return -EINVAL;
|
||||
|
@ -2615,11 +2613,19 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
|
|||
* the pinned extents.
|
||||
*/
|
||||
btrfs_cache_block_group(cache, 1);
|
||||
/*
|
||||
* Make sure we wait until the cache is completely built in case it is
|
||||
* missing or is invalid and therefore needs to be rebuilt.
|
||||
*/
|
||||
ret = btrfs_wait_block_group_cache_done(cache);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
pin_down_extent(trans, cache, bytenr, num_bytes, 0);
|
||||
|
||||
/* remove us from the free space cache (if we're there at all) */
|
||||
ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
|
||||
out:
|
||||
btrfs_put_block_group(cache);
|
||||
return ret;
|
||||
}
|
||||
|
@ -2629,50 +2635,22 @@ static int __exclude_logged_extent(struct btrfs_fs_info *fs_info,
|
|||
{
|
||||
int ret;
|
||||
struct btrfs_block_group *block_group;
|
||||
struct btrfs_caching_control *caching_ctl;
|
||||
|
||||
block_group = btrfs_lookup_block_group(fs_info, start);
|
||||
if (!block_group)
|
||||
return -EINVAL;
|
||||
|
||||
btrfs_cache_block_group(block_group, 0);
|
||||
caching_ctl = btrfs_get_caching_control(block_group);
|
||||
btrfs_cache_block_group(block_group, 1);
|
||||
/*
|
||||
* Make sure we wait until the cache is completely built in case it is
|
||||
* missing or is invalid and therefore needs to be rebuilt.
|
||||
*/
|
||||
ret = btrfs_wait_block_group_cache_done(block_group);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (!caching_ctl) {
|
||||
/* Logic error */
|
||||
BUG_ON(!btrfs_block_group_done(block_group));
|
||||
ret = btrfs_remove_free_space(block_group, start, num_bytes);
|
||||
} else {
|
||||
/*
|
||||
* We must wait for v1 caching to finish, otherwise we may not
|
||||
* remove our space.
|
||||
*/
|
||||
btrfs_wait_space_cache_v1_finished(block_group, caching_ctl);
|
||||
mutex_lock(&caching_ctl->mutex);
|
||||
|
||||
if (start >= caching_ctl->progress) {
|
||||
ret = btrfs_add_excluded_extent(fs_info, start,
|
||||
num_bytes);
|
||||
} else if (start + num_bytes <= caching_ctl->progress) {
|
||||
ret = btrfs_remove_free_space(block_group,
|
||||
start, num_bytes);
|
||||
} else {
|
||||
num_bytes = caching_ctl->progress - start;
|
||||
ret = btrfs_remove_free_space(block_group,
|
||||
start, num_bytes);
|
||||
if (ret)
|
||||
goto out_lock;
|
||||
|
||||
num_bytes = (start + num_bytes) -
|
||||
caching_ctl->progress;
|
||||
start = caching_ctl->progress;
|
||||
ret = btrfs_add_excluded_extent(fs_info, start,
|
||||
num_bytes);
|
||||
}
|
||||
out_lock:
|
||||
mutex_unlock(&caching_ctl->mutex);
|
||||
btrfs_put_caching_control(caching_ctl);
|
||||
}
|
||||
ret = btrfs_remove_free_space(block_group, start, num_bytes);
|
||||
out:
|
||||
btrfs_put_block_group(block_group);
|
||||
return ret;
|
||||
}
|
||||
|
@ -2863,9 +2841,6 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
|
|||
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
|
||||
break;
|
||||
}
|
||||
if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
|
||||
clear_extent_bits(&fs_info->excluded_extents, start,
|
||||
end, EXTENT_UPTODATE);
|
||||
|
||||
if (btrfs_test_opt(fs_info, DISCARD_SYNC))
|
||||
ret = btrfs_discard_extent(fs_info, start,
|
||||
|
|
|
@ -1150,6 +1150,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
|
|||
return PTR_ERR(trans);
|
||||
|
||||
set_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
|
||||
set_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags);
|
||||
free_space_root = btrfs_create_tree(trans,
|
||||
BTRFS_FREE_SPACE_TREE_OBJECTID);
|
||||
if (IS_ERR(free_space_root)) {
|
||||
|
@ -1171,11 +1172,18 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
|
|||
btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE);
|
||||
btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID);
|
||||
clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
|
||||
ret = btrfs_commit_transaction(trans);
|
||||
|
||||
return btrfs_commit_transaction(trans);
|
||||
/*
|
||||
* Now that we've committed the transaction any reading of our commit
|
||||
* root will be safe, so we can cache from the free space tree now.
|
||||
*/
|
||||
clear_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags);
|
||||
return ret;
|
||||
|
||||
abort:
|
||||
clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
|
||||
clear_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags);
|
||||
btrfs_abort_transaction(trans, ret);
|
||||
btrfs_end_transaction(trans);
|
||||
return ret;
|
||||
|
|
|
@ -433,7 +433,7 @@ static struct btrfs_device *__alloc_device(struct btrfs_fs_info *fs_info)
|
|||
|
||||
atomic_set(&dev->reada_in_flight, 0);
|
||||
atomic_set(&dev->dev_stats_ccnt, 0);
|
||||
btrfs_device_data_ordered_init(dev, fs_info);
|
||||
btrfs_device_data_ordered_init(dev);
|
||||
INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
|
||||
INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
|
||||
extent_io_tree_init(fs_info, &dev->alloc_state,
|
||||
|
|
|
@ -39,10 +39,10 @@ struct btrfs_io_geometry {
|
|||
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
|
||||
#include <linux/seqlock.h>
|
||||
#define __BTRFS_NEED_DEVICE_DATA_ORDERED
|
||||
#define btrfs_device_data_ordered_init(device, info) \
|
||||
seqcount_mutex_init(&device->data_seqcount, &info->chunk_mutex)
|
||||
#define btrfs_device_data_ordered_init(device) \
|
||||
seqcount_init(&device->data_seqcount)
|
||||
#else
|
||||
#define btrfs_device_data_ordered_init(device, info) do { } while (0)
|
||||
#define btrfs_device_data_ordered_init(device) do { } while (0)
|
||||
#endif
|
||||
|
||||
#define BTRFS_DEV_STATE_WRITEABLE (0)
|
||||
|
@ -76,8 +76,7 @@ struct btrfs_device {
|
|||
blk_status_t last_flush_error;
|
||||
|
||||
#ifdef __BTRFS_NEED_DEVICE_DATA_ORDERED
|
||||
/* A seqcount_t with associated chunk_mutex (for lockdep) */
|
||||
seqcount_mutex_t data_seqcount;
|
||||
seqcount_t data_seqcount;
|
||||
#endif
|
||||
|
||||
/* the internal btrfs device id */
|
||||
|
@ -168,9 +167,11 @@ btrfs_device_get_##name(const struct btrfs_device *dev) \
|
|||
static inline void \
|
||||
btrfs_device_set_##name(struct btrfs_device *dev, u64 size) \
|
||||
{ \
|
||||
preempt_disable(); \
|
||||
write_seqcount_begin(&dev->data_seqcount); \
|
||||
dev->name = size; \
|
||||
write_seqcount_end(&dev->data_seqcount); \
|
||||
preempt_enable(); \
|
||||
}
|
||||
#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
|
||||
#define BTRFS_DEVICE_GETSET_FUNCS(name) \
|
||||
|
|
|
@ -1026,6 +1026,7 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
|
|||
const struct iovec *fast_iov,
|
||||
struct iov_iter *iter, bool force);
|
||||
static void io_req_drop_files(struct io_kiocb *req);
|
||||
static void io_req_task_queue(struct io_kiocb *req);
|
||||
|
||||
static struct kmem_cache *req_cachep;
|
||||
|
||||
|
@ -1069,8 +1070,12 @@ static bool io_match_task(struct io_kiocb *head,
|
|||
{
|
||||
struct io_kiocb *req;
|
||||
|
||||
if (task && head->task != task)
|
||||
if (task && head->task != task) {
|
||||
/* in terms of cancelation, always match if req task is dead */
|
||||
if (head->task->flags & PF_EXITING)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
if (!files)
|
||||
return true;
|
||||
|
||||
|
@ -1630,18 +1635,11 @@ static void __io_queue_deferred(struct io_ring_ctx *ctx)
|
|||
do {
|
||||
struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
|
||||
struct io_defer_entry, list);
|
||||
struct io_kiocb *link;
|
||||
|
||||
if (req_need_defer(de->req, de->seq))
|
||||
break;
|
||||
list_del_init(&de->list);
|
||||
/* punt-init is done before queueing for defer */
|
||||
link = __io_queue_async_work(de->req);
|
||||
if (link) {
|
||||
__io_queue_linked_timeout(link);
|
||||
/* drop submission reference */
|
||||
io_put_req_deferred(link, 1);
|
||||
}
|
||||
io_req_task_queue(de->req);
|
||||
kfree(de);
|
||||
} while (!list_empty(&ctx->defer_list));
|
||||
}
|
||||
|
@ -1775,12 +1773,13 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
|
|||
struct io_kiocb *req, *tmp;
|
||||
struct io_uring_cqe *cqe;
|
||||
unsigned long flags;
|
||||
bool all_flushed;
|
||||
bool all_flushed, posted;
|
||||
LIST_HEAD(list);
|
||||
|
||||
if (!force && __io_cqring_events(ctx) == rings->cq_ring_entries)
|
||||
return false;
|
||||
|
||||
posted = false;
|
||||
spin_lock_irqsave(&ctx->completion_lock, flags);
|
||||
list_for_each_entry_safe(req, tmp, &ctx->cq_overflow_list, compl.list) {
|
||||
if (!io_match_task(req, tsk, files))
|
||||
|
@ -1800,6 +1799,7 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
|
|||
WRITE_ONCE(ctx->rings->cq_overflow,
|
||||
ctx->cached_cq_overflow);
|
||||
}
|
||||
posted = true;
|
||||
}
|
||||
|
||||
all_flushed = list_empty(&ctx->cq_overflow_list);
|
||||
|
@ -1809,9 +1809,11 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
|
|||
ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
|
||||
}
|
||||
|
||||
io_commit_cqring(ctx);
|
||||
if (posted)
|
||||
io_commit_cqring(ctx);
|
||||
spin_unlock_irqrestore(&ctx->completion_lock, flags);
|
||||
io_cqring_ev_posted(ctx);
|
||||
if (posted)
|
||||
io_cqring_ev_posted(ctx);
|
||||
|
||||
while (!list_empty(&list)) {
|
||||
req = list_first_entry(&list, struct io_kiocb, compl.list);
|
||||
|
@ -6458,7 +6460,8 @@ static struct file *io_file_get(struct io_submit_state *state,
|
|||
file = __io_file_get(state, fd);
|
||||
}
|
||||
|
||||
if (file && file->f_op == &io_uring_fops) {
|
||||
if (file && file->f_op == &io_uring_fops &&
|
||||
!(req->flags & REQ_F_INFLIGHT)) {
|
||||
io_req_init_async(req);
|
||||
req->flags |= REQ_F_INFLIGHT;
|
||||
|
||||
|
@ -7266,14 +7269,18 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
|
|||
TASK_INTERRUPTIBLE);
|
||||
/* make sure we run task_work before checking for signals */
|
||||
ret = io_run_task_work_sig();
|
||||
if (ret > 0)
|
||||
if (ret > 0) {
|
||||
finish_wait(&ctx->wait, &iowq.wq);
|
||||
continue;
|
||||
}
|
||||
else if (ret < 0)
|
||||
break;
|
||||
if (io_should_wake(&iowq))
|
||||
break;
|
||||
if (test_bit(0, &ctx->cq_check_overflow))
|
||||
if (test_bit(0, &ctx->cq_check_overflow)) {
|
||||
finish_wait(&ctx->wait, &iowq.wq);
|
||||
continue;
|
||||
}
|
||||
if (uts) {
|
||||
timeout = schedule_timeout(timeout);
|
||||
if (timeout == 0) {
|
||||
|
@ -8865,30 +8872,31 @@ static void io_cancel_defer_files(struct io_ring_ctx *ctx,
|
|||
}
|
||||
}
|
||||
|
||||
static int io_uring_count_inflight(struct io_ring_ctx *ctx,
|
||||
struct task_struct *task,
|
||||
struct files_struct *files)
|
||||
{
|
||||
struct io_kiocb *req;
|
||||
int cnt = 0;
|
||||
|
||||
spin_lock_irq(&ctx->inflight_lock);
|
||||
list_for_each_entry(req, &ctx->inflight_list, inflight_entry)
|
||||
cnt += io_match_task(req, task, files);
|
||||
spin_unlock_irq(&ctx->inflight_lock);
|
||||
return cnt;
|
||||
}
|
||||
|
||||
static void io_uring_cancel_files(struct io_ring_ctx *ctx,
|
||||
struct task_struct *task,
|
||||
struct files_struct *files)
|
||||
{
|
||||
while (!list_empty_careful(&ctx->inflight_list)) {
|
||||
struct io_task_cancel cancel = { .task = task, .files = files };
|
||||
struct io_kiocb *req;
|
||||
DEFINE_WAIT(wait);
|
||||
bool found = false;
|
||||
int inflight;
|
||||
|
||||
spin_lock_irq(&ctx->inflight_lock);
|
||||
list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
|
||||
if (!io_match_task(req, task, files))
|
||||
continue;
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
if (found)
|
||||
prepare_to_wait(&task->io_uring->wait, &wait,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
spin_unlock_irq(&ctx->inflight_lock);
|
||||
|
||||
/* We need to keep going until we don't find a matching req */
|
||||
if (!found)
|
||||
inflight = io_uring_count_inflight(ctx, task, files);
|
||||
if (!inflight)
|
||||
break;
|
||||
|
||||
io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, &cancel, true);
|
||||
|
@ -8897,7 +8905,11 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
|
|||
io_cqring_overflow_flush(ctx, true, task, files);
|
||||
/* cancellations _may_ trigger task work */
|
||||
io_run_task_work();
|
||||
schedule();
|
||||
|
||||
prepare_to_wait(&task->io_uring->wait, &wait,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
if (inflight == io_uring_count_inflight(ctx, task, files))
|
||||
schedule();
|
||||
finish_wait(&task->io_uring->wait, &wait);
|
||||
}
|
||||
}
|
||||
|
@ -8955,8 +8967,6 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
|
|||
struct task_struct *task = current;
|
||||
|
||||
if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
|
||||
/* for SQPOLL only sqo_task has task notes */
|
||||
WARN_ON_ONCE(ctx->sqo_task != current);
|
||||
io_disable_sqo_submit(ctx);
|
||||
task = ctx->sq_data->thread;
|
||||
atomic_inc(&task->io_uring->in_idle);
|
||||
|
@ -8966,10 +8976,9 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
|
|||
io_cancel_defer_files(ctx, task, files);
|
||||
io_cqring_overflow_flush(ctx, true, task, files);
|
||||
|
||||
io_uring_cancel_files(ctx, task, files);
|
||||
if (!files)
|
||||
__io_uring_cancel_task_requests(ctx, task);
|
||||
else
|
||||
io_uring_cancel_files(ctx, task, files);
|
||||
|
||||
if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
|
||||
atomic_dec(&task->io_uring->in_idle);
|
||||
|
@ -9116,16 +9125,15 @@ void __io_uring_task_cancel(void)
|
|||
prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
|
||||
|
||||
/*
|
||||
* If we've seen completions, retry. This avoids a race where
|
||||
* a completion comes in before we did prepare_to_wait().
|
||||
* If we've seen completions, retry without waiting. This
|
||||
* avoids a race where a completion comes in before we did
|
||||
* prepare_to_wait().
|
||||
*/
|
||||
if (inflight != tctx_inflight(tctx))
|
||||
continue;
|
||||
schedule();
|
||||
if (inflight == tctx_inflight(tctx))
|
||||
schedule();
|
||||
finish_wait(&tctx->wait, &wait);
|
||||
} while (1);
|
||||
|
||||
finish_wait(&tctx->wait, &wait);
|
||||
atomic_dec(&tctx->in_idle);
|
||||
|
||||
io_uring_remove_task_files(tctx);
|
||||
|
@ -9136,6 +9144,9 @@ static int io_uring_flush(struct file *file, void *data)
|
|||
struct io_uring_task *tctx = current->io_uring;
|
||||
struct io_ring_ctx *ctx = file->private_data;
|
||||
|
||||
if (fatal_signal_pending(current) || (current->flags & PF_EXITING))
|
||||
io_uring_cancel_task_requests(ctx, NULL);
|
||||
|
||||
if (!tctx)
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -1134,7 +1134,6 @@ int kernel_kexec(void)
|
|||
|
||||
#ifdef CONFIG_KEXEC_JUMP
|
||||
if (kexec_image->preserve_context) {
|
||||
lock_system_sleep();
|
||||
pm_prepare_console();
|
||||
error = freeze_processes();
|
||||
if (error) {
|
||||
|
@ -1197,7 +1196,6 @@ int kernel_kexec(void)
|
|||
thaw_processes();
|
||||
Restore_console:
|
||||
pm_restore_console();
|
||||
unlock_system_sleep();
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -497,10 +497,10 @@ static int swap_writer_finish(struct swap_map_handle *handle,
|
|||
unsigned int flags, int error)
|
||||
{
|
||||
if (!error) {
|
||||
flush_swap_writer(handle);
|
||||
pr_info("S");
|
||||
error = mark_swapfiles(handle, flags);
|
||||
pr_cont("|\n");
|
||||
flush_swap_writer(handle);
|
||||
}
|
||||
|
||||
if (error)
|
||||
|
|
Loading…
Reference in New Issue