mirror of https://gitee.com/openkylin/linux.git
Merge branch 'for-3.19/drivers' of git://git.kernel.dk/linux-block
Pull block layer driver updates from Jens Axboe: - NVMe updates: - The blk-mq conversion from Matias (and others) - A stack of NVMe bug fixes from the nvme tree, mostly from Keith. - Various bug fixes from me, fixing issues in both the blk-mq conversion and generic bugs. - Abort and CPU online fix from Sam. - Hot add/remove fix from Indraneel. - A couple of drbd fixes from the drbd team (Andreas, Lars, Philipp) - With the generic IO stat accounting from 3.19/core, converting md, bcache, and rsxx to use those. From Gu Zheng. - Boundary check for queue/irq mode for null_blk from Matias. Fixes cases where invalid values could be given, causing the device to hang. - The xen blkfront pull request, with two bug fixes from Vitaly. * 'for-3.19/drivers' of git://git.kernel.dk/linux-block: (56 commits) NVMe: fix race condition in nvme_submit_sync_cmd() NVMe: fix retry/error logic in nvme_queue_rq() NVMe: Fix FS mount issue (hot-remove followed by hot-add) NVMe: fix error return checking from blk_mq_alloc_request() NVMe: fix freeing of wrong request in abort path xen/blkfront: remove redundant flush_op xen/blkfront: improve protection against issuing unsupported REQ_FUA NVMe: Fix command setup on IO retry null_blk: boundary check queue_mode and irqmode block/rsxx: use generic io stats accounting functions to simplify io stat accounting md: use generic io stats accounting functions to simplify io stat accounting drbd: use generic io stats accounting functions to simplify io stat accounting md/bcache: use generic io stats accounting functions to simplify io stat accounting NVMe: Update module version major number NVMe: fail pci initialization if the device doesn't have any BARs NVMe: add ->exit_hctx() hook NVMe: make setup work for devices that don't do INTx NVMe: enable IO stats by default NVMe: nvme_submit_async_admin_req() must use atomic rq allocation NVMe: replace blk_put_request() with blk_mq_free_request() ...
This commit is contained in:
commit
9ea18f8cab
|
@ -827,8 +827,7 @@ static int update_sync_bits(struct drbd_device *device,
|
|||
*
|
||||
*/
|
||||
int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size,
|
||||
enum update_sync_bits_mode mode,
|
||||
const char *file, const unsigned int line)
|
||||
enum update_sync_bits_mode mode)
|
||||
{
|
||||
/* Is called from worker and receiver context _only_ */
|
||||
unsigned long sbnr, ebnr, lbnr;
|
||||
|
|
|
@ -1454,7 +1454,6 @@ extern int is_valid_ar_handle(struct drbd_request *, sector_t);
|
|||
|
||||
|
||||
/* drbd_nl.c */
|
||||
extern int drbd_msg_put_info(struct sk_buff *skb, const char *info);
|
||||
extern void drbd_suspend_io(struct drbd_device *device);
|
||||
extern void drbd_resume_io(struct drbd_device *device);
|
||||
extern char *ppsize(char *buf, unsigned long long size);
|
||||
|
@ -1558,52 +1557,31 @@ extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled);
|
|||
extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
|
||||
extern int drbd_connected(struct drbd_peer_device *);
|
||||
|
||||
/* Yes, there is kernel_setsockopt, but only since 2.6.18.
|
||||
* So we have our own copy of it here. */
|
||||
static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
|
||||
char *optval, int optlen)
|
||||
{
|
||||
mm_segment_t oldfs = get_fs();
|
||||
char __user *uoptval;
|
||||
int err;
|
||||
|
||||
uoptval = (char __user __force *)optval;
|
||||
|
||||
set_fs(KERNEL_DS);
|
||||
if (level == SOL_SOCKET)
|
||||
err = sock_setsockopt(sock, level, optname, uoptval, optlen);
|
||||
else
|
||||
err = sock->ops->setsockopt(sock, level, optname, uoptval,
|
||||
optlen);
|
||||
set_fs(oldfs);
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline void drbd_tcp_cork(struct socket *sock)
|
||||
{
|
||||
int val = 1;
|
||||
(void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
|
||||
(void) kernel_setsockopt(sock, SOL_TCP, TCP_CORK,
|
||||
(char*)&val, sizeof(val));
|
||||
}
|
||||
|
||||
static inline void drbd_tcp_uncork(struct socket *sock)
|
||||
{
|
||||
int val = 0;
|
||||
(void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
|
||||
(void) kernel_setsockopt(sock, SOL_TCP, TCP_CORK,
|
||||
(char*)&val, sizeof(val));
|
||||
}
|
||||
|
||||
static inline void drbd_tcp_nodelay(struct socket *sock)
|
||||
{
|
||||
int val = 1;
|
||||
(void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
|
||||
(void) kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY,
|
||||
(char*)&val, sizeof(val));
|
||||
}
|
||||
|
||||
static inline void drbd_tcp_quickack(struct socket *sock)
|
||||
{
|
||||
int val = 2;
|
||||
(void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
|
||||
(void) kernel_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
|
||||
(char*)&val, sizeof(val));
|
||||
}
|
||||
|
||||
|
@ -1662,14 +1640,13 @@ extern void drbd_advance_rs_marks(struct drbd_device *device, unsigned long stil
|
|||
|
||||
enum update_sync_bits_mode { RECORD_RS_FAILED, SET_OUT_OF_SYNC, SET_IN_SYNC };
|
||||
extern int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size,
|
||||
enum update_sync_bits_mode mode,
|
||||
const char *file, const unsigned int line);
|
||||
enum update_sync_bits_mode mode);
|
||||
#define drbd_set_in_sync(device, sector, size) \
|
||||
__drbd_change_sync(device, sector, size, SET_IN_SYNC, __FILE__, __LINE__)
|
||||
__drbd_change_sync(device, sector, size, SET_IN_SYNC)
|
||||
#define drbd_set_out_of_sync(device, sector, size) \
|
||||
__drbd_change_sync(device, sector, size, SET_OUT_OF_SYNC, __FILE__, __LINE__)
|
||||
__drbd_change_sync(device, sector, size, SET_OUT_OF_SYNC)
|
||||
#define drbd_rs_failed_io(device, sector, size) \
|
||||
__drbd_change_sync(device, sector, size, RECORD_RS_FAILED, __FILE__, __LINE__)
|
||||
__drbd_change_sync(device, sector, size, RECORD_RS_FAILED)
|
||||
extern void drbd_al_shrink(struct drbd_device *device);
|
||||
extern int drbd_initialize_al(struct drbd_device *, void *);
|
||||
|
||||
|
|
|
@ -2532,10 +2532,6 @@ int set_resource_options(struct drbd_resource *resource, struct res_opts *res_op
|
|||
|
||||
if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
/*
|
||||
retcode = ERR_NOMEM;
|
||||
drbd_msg_put_info("unable to allocate cpumask");
|
||||
*/
|
||||
|
||||
/* silently ignore cpu mask on UP kernel */
|
||||
if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) {
|
||||
|
@ -2731,7 +2727,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
|
|||
|
||||
device = minor_to_device(minor);
|
||||
if (device)
|
||||
return ERR_MINOR_EXISTS;
|
||||
return ERR_MINOR_OR_VOLUME_EXISTS;
|
||||
|
||||
/* GFP_KERNEL, we are outside of all write-out paths */
|
||||
device = kzalloc(sizeof(struct drbd_device), GFP_KERNEL);
|
||||
|
@ -2793,20 +2789,16 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
|
|||
|
||||
id = idr_alloc(&drbd_devices, device, minor, minor + 1, GFP_KERNEL);
|
||||
if (id < 0) {
|
||||
if (id == -ENOSPC) {
|
||||
err = ERR_MINOR_EXISTS;
|
||||
drbd_msg_put_info(adm_ctx->reply_skb, "requested minor exists already");
|
||||
}
|
||||
if (id == -ENOSPC)
|
||||
err = ERR_MINOR_OR_VOLUME_EXISTS;
|
||||
goto out_no_minor_idr;
|
||||
}
|
||||
kref_get(&device->kref);
|
||||
|
||||
id = idr_alloc(&resource->devices, device, vnr, vnr + 1, GFP_KERNEL);
|
||||
if (id < 0) {
|
||||
if (id == -ENOSPC) {
|
||||
err = ERR_MINOR_EXISTS;
|
||||
drbd_msg_put_info(adm_ctx->reply_skb, "requested minor exists already");
|
||||
}
|
||||
if (id == -ENOSPC)
|
||||
err = ERR_MINOR_OR_VOLUME_EXISTS;
|
||||
goto out_idr_remove_minor;
|
||||
}
|
||||
kref_get(&device->kref);
|
||||
|
@ -2825,10 +2817,8 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
|
|||
|
||||
id = idr_alloc(&connection->peer_devices, peer_device, vnr, vnr + 1, GFP_KERNEL);
|
||||
if (id < 0) {
|
||||
if (id == -ENOSPC) {
|
||||
if (id == -ENOSPC)
|
||||
err = ERR_INVALID_REQUEST;
|
||||
drbd_msg_put_info(adm_ctx->reply_skb, "requested volume exists already");
|
||||
}
|
||||
goto out_idr_remove_from_resource;
|
||||
}
|
||||
kref_get(&connection->kref);
|
||||
|
@ -2836,7 +2826,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
|
|||
|
||||
if (init_submitter(device)) {
|
||||
err = ERR_NOMEM;
|
||||
drbd_msg_put_info(adm_ctx->reply_skb, "unable to create submit workqueue");
|
||||
goto out_idr_remove_vol;
|
||||
}
|
||||
|
||||
|
|
|
@ -92,7 +92,7 @@ static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
|
|||
|
||||
/* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
|
||||
* reason it could fail was no space in skb, and there are 4k available. */
|
||||
int drbd_msg_put_info(struct sk_buff *skb, const char *info)
|
||||
static int drbd_msg_put_info(struct sk_buff *skb, const char *info)
|
||||
{
|
||||
struct nlattr *nla;
|
||||
int err = -EMSGSIZE;
|
||||
|
@ -588,7 +588,7 @@ drbd_set_role(struct drbd_device *const device, enum drbd_role new_role, int for
|
|||
val.i = 0; val.role = new_role;
|
||||
|
||||
while (try++ < max_tries) {
|
||||
rv = _drbd_request_state(device, mask, val, CS_WAIT_COMPLETE);
|
||||
rv = _drbd_request_state_holding_state_mutex(device, mask, val, CS_WAIT_COMPLETE);
|
||||
|
||||
/* in case we first succeeded to outdate,
|
||||
* but now suddenly could establish a connection */
|
||||
|
@ -2052,7 +2052,7 @@ check_net_options(struct drbd_connection *connection, struct net_conf *new_net_c
|
|||
rv = _check_net_options(connection, rcu_dereference(connection->net_conf), new_net_conf);
|
||||
rcu_read_unlock();
|
||||
|
||||
/* connection->volumes protected by genl_lock() here */
|
||||
/* connection->peer_devices protected by genl_lock() here */
|
||||
idr_for_each_entry(&connection->peer_devices, peer_device, i) {
|
||||
struct drbd_device *device = peer_device->device;
|
||||
if (!device->bitmap) {
|
||||
|
@ -3483,7 +3483,7 @@ int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info)
|
|||
* that first_peer_device(device)->connection and device->vnr match the request. */
|
||||
if (adm_ctx.device) {
|
||||
if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
|
||||
retcode = ERR_MINOR_EXISTS;
|
||||
retcode = ERR_MINOR_OR_VOLUME_EXISTS;
|
||||
/* else: still NO_ERROR */
|
||||
goto out;
|
||||
}
|
||||
|
@ -3530,6 +3530,27 @@ int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int adm_del_resource(struct drbd_resource *resource)
|
||||
{
|
||||
struct drbd_connection *connection;
|
||||
|
||||
for_each_connection(connection, resource) {
|
||||
if (connection->cstate > C_STANDALONE)
|
||||
return ERR_NET_CONFIGURED;
|
||||
}
|
||||
if (!idr_is_empty(&resource->devices))
|
||||
return ERR_RES_IN_USE;
|
||||
|
||||
list_del_rcu(&resource->resources);
|
||||
/* Make sure all threads have actually stopped: state handling only
|
||||
* does drbd_thread_stop_nowait(). */
|
||||
list_for_each_entry(connection, &resource->connections, connections)
|
||||
drbd_thread_stop(&connection->worker);
|
||||
synchronize_rcu();
|
||||
drbd_free_resource(resource);
|
||||
return NO_ERROR;
|
||||
}
|
||||
|
||||
int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
|
||||
{
|
||||
struct drbd_config_context adm_ctx;
|
||||
|
@ -3575,14 +3596,6 @@ int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
|
|||
}
|
||||
}
|
||||
|
||||
/* If we reach this, all volumes (of this connection) are Secondary,
|
||||
* Disconnected, Diskless, aka Unconfigured. Make sure all threads have
|
||||
* actually stopped, state handling only does drbd_thread_stop_nowait(). */
|
||||
for_each_connection(connection, resource)
|
||||
drbd_thread_stop(&connection->worker);
|
||||
|
||||
/* Now, nothing can fail anymore */
|
||||
|
||||
/* delete volumes */
|
||||
idr_for_each_entry(&resource->devices, device, i) {
|
||||
retcode = adm_del_minor(device);
|
||||
|
@ -3593,10 +3606,7 @@ int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
|
|||
}
|
||||
}
|
||||
|
||||
list_del_rcu(&resource->resources);
|
||||
synchronize_rcu();
|
||||
drbd_free_resource(resource);
|
||||
retcode = NO_ERROR;
|
||||
retcode = adm_del_resource(resource);
|
||||
out:
|
||||
mutex_unlock(&resource->adm_mutex);
|
||||
finish:
|
||||
|
@ -3608,7 +3618,6 @@ int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
|
|||
{
|
||||
struct drbd_config_context adm_ctx;
|
||||
struct drbd_resource *resource;
|
||||
struct drbd_connection *connection;
|
||||
enum drbd_ret_code retcode;
|
||||
|
||||
retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
|
||||
|
@ -3616,27 +3625,10 @@ int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
|
|||
return retcode;
|
||||
if (retcode != NO_ERROR)
|
||||
goto finish;
|
||||
|
||||
resource = adm_ctx.resource;
|
||||
mutex_lock(&resource->adm_mutex);
|
||||
for_each_connection(connection, resource) {
|
||||
if (connection->cstate > C_STANDALONE) {
|
||||
retcode = ERR_NET_CONFIGURED;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
if (!idr_is_empty(&resource->devices)) {
|
||||
retcode = ERR_RES_IN_USE;
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_del_rcu(&resource->resources);
|
||||
for_each_connection(connection, resource)
|
||||
drbd_thread_stop(&connection->worker);
|
||||
synchronize_rcu();
|
||||
drbd_free_resource(resource);
|
||||
retcode = NO_ERROR;
|
||||
out:
|
||||
mutex_lock(&resource->adm_mutex);
|
||||
retcode = adm_del_resource(resource);
|
||||
mutex_unlock(&resource->adm_mutex);
|
||||
finish:
|
||||
drbd_adm_finish(&adm_ctx, info, retcode);
|
||||
|
|
|
@ -2482,7 +2482,7 @@ bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
|
|||
atomic_read(&device->rs_sect_ev);
|
||||
|
||||
if (atomic_read(&device->ap_actlog_cnt)
|
||||
|| !device->rs_last_events || curr_events - device->rs_last_events > 64) {
|
||||
|| curr_events - device->rs_last_events > 64) {
|
||||
unsigned long rs_left;
|
||||
int i;
|
||||
|
||||
|
|
|
@ -36,29 +36,15 @@ static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector,
|
|||
/* Update disk stats at start of I/O request */
|
||||
static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request *req)
|
||||
{
|
||||
const int rw = bio_data_dir(req->master_bio);
|
||||
int cpu;
|
||||
cpu = part_stat_lock();
|
||||
part_round_stats(cpu, &device->vdisk->part0);
|
||||
part_stat_inc(cpu, &device->vdisk->part0, ios[rw]);
|
||||
part_stat_add(cpu, &device->vdisk->part0, sectors[rw], req->i.size >> 9);
|
||||
(void) cpu; /* The macro invocations above want the cpu argument, I do not like
|
||||
the compiler warning about cpu only assigned but never used... */
|
||||
part_inc_in_flight(&device->vdisk->part0, rw);
|
||||
part_stat_unlock();
|
||||
generic_start_io_acct(bio_data_dir(req->master_bio), req->i.size >> 9,
|
||||
&device->vdisk->part0);
|
||||
}
|
||||
|
||||
/* Update disk stats when completing request upwards */
|
||||
static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req)
|
||||
{
|
||||
int rw = bio_data_dir(req->master_bio);
|
||||
unsigned long duration = jiffies - req->start_jif;
|
||||
int cpu;
|
||||
cpu = part_stat_lock();
|
||||
part_stat_add(cpu, &device->vdisk->part0, ticks[rw], duration);
|
||||
part_round_stats(cpu, &device->vdisk->part0);
|
||||
part_dec_in_flight(&device->vdisk->part0, rw);
|
||||
part_stat_unlock();
|
||||
generic_end_io_acct(bio_data_dir(req->master_bio),
|
||||
&device->vdisk->part0, req->start_jif);
|
||||
}
|
||||
|
||||
static struct drbd_request *drbd_req_new(struct drbd_device *device,
|
||||
|
@ -1545,6 +1531,7 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct
|
|||
struct request_queue * const b =
|
||||
device->ldev->backing_bdev->bd_disk->queue;
|
||||
if (b->merge_bvec_fn) {
|
||||
bvm->bi_bdev = device->ldev->backing_bdev;
|
||||
backing_limit = b->merge_bvec_fn(b, bvm, bvec);
|
||||
limit = min(limit, backing_limit);
|
||||
}
|
||||
|
@ -1628,7 +1615,7 @@ void request_timer_fn(unsigned long data)
|
|||
time_after(now, req_peer->pre_send_jif + ent) &&
|
||||
!time_in_range(now, connection->last_reconnect_jif, connection->last_reconnect_jif + ent)) {
|
||||
drbd_warn(device, "Remote failed to finish a request within ko-count * timeout\n");
|
||||
_drbd_set_state(_NS(device, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL);
|
||||
_conn_request_state(connection, NS(conn, C_TIMEOUT), CS_VERBOSE | CS_HARD);
|
||||
}
|
||||
if (dt && oldest_submit_jif != now &&
|
||||
time_after(now, oldest_submit_jif + dt) &&
|
||||
|
|
|
@ -215,6 +215,18 @@ static bool no_peer_wf_report_params(struct drbd_connection *connection)
|
|||
return rv;
|
||||
}
|
||||
|
||||
static void wake_up_all_devices(struct drbd_connection *connection)
|
||||
{
|
||||
struct drbd_peer_device *peer_device;
|
||||
int vnr;
|
||||
|
||||
rcu_read_lock();
|
||||
idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
|
||||
wake_up(&peer_device->device->state_wait);
|
||||
rcu_read_unlock();
|
||||
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* cl_wide_st_chg() - true if the state change is a cluster wide one
|
||||
|
@ -410,6 +422,22 @@ _drbd_request_state(struct drbd_device *device, union drbd_state mask,
|
|||
return rv;
|
||||
}
|
||||
|
||||
enum drbd_state_rv
|
||||
_drbd_request_state_holding_state_mutex(struct drbd_device *device, union drbd_state mask,
|
||||
union drbd_state val, enum chg_state_flags f)
|
||||
{
|
||||
enum drbd_state_rv rv;
|
||||
|
||||
BUG_ON(f & CS_SERIALIZE);
|
||||
|
||||
wait_event_cmd(device->state_wait,
|
||||
(rv = drbd_req_state(device, mask, val, f)) != SS_IN_TRANSIENT_STATE,
|
||||
mutex_unlock(device->state_mutex),
|
||||
mutex_lock(device->state_mutex));
|
||||
|
||||
return rv;
|
||||
}
|
||||
|
||||
static void print_st(struct drbd_device *device, const char *name, union drbd_state ns)
|
||||
{
|
||||
drbd_err(device, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c%c%c }\n",
|
||||
|
@ -629,14 +657,11 @@ is_valid_soft_transition(union drbd_state os, union drbd_state ns, struct drbd_c
|
|||
if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
|
||||
rv = SS_IN_TRANSIENT_STATE;
|
||||
|
||||
/* if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
|
||||
rv = SS_IN_TRANSIENT_STATE; */
|
||||
|
||||
/* While establishing a connection only allow cstate to change.
|
||||
Delay/refuse role changes, detach attach etc... */
|
||||
Delay/refuse role changes, detach attach etc... (they do not touch cstate) */
|
||||
if (test_bit(STATE_SENT, &connection->flags) &&
|
||||
!(os.conn == C_WF_REPORT_PARAMS ||
|
||||
(ns.conn == C_WF_REPORT_PARAMS && os.conn == C_WF_CONNECTION)))
|
||||
!((ns.conn == C_WF_REPORT_PARAMS && os.conn == C_WF_CONNECTION) ||
|
||||
(ns.conn >= C_CONNECTED && os.conn == C_WF_REPORT_PARAMS)))
|
||||
rv = SS_IN_TRANSIENT_STATE;
|
||||
|
||||
if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
|
||||
|
@ -1032,8 +1057,10 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns,
|
|||
|
||||
/* Wake up role changes, that were delayed because of connection establishing */
|
||||
if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS &&
|
||||
no_peer_wf_report_params(connection))
|
||||
no_peer_wf_report_params(connection)) {
|
||||
clear_bit(STATE_SENT, &connection->flags);
|
||||
wake_up_all_devices(connection);
|
||||
}
|
||||
|
||||
wake_up(&device->misc_wait);
|
||||
wake_up(&device->state_wait);
|
||||
|
@ -1072,7 +1099,6 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns,
|
|||
|
||||
set_ov_position(device, ns.conn);
|
||||
device->rs_start = now;
|
||||
device->rs_last_events = 0;
|
||||
device->rs_last_sect_ev = 0;
|
||||
device->ov_last_oos_size = 0;
|
||||
device->ov_last_oos_start = 0;
|
||||
|
|
|
@ -117,6 +117,11 @@ extern enum drbd_state_rv _drbd_request_state(struct drbd_device *,
|
|||
union drbd_state,
|
||||
union drbd_state,
|
||||
enum chg_state_flags);
|
||||
|
||||
extern enum drbd_state_rv
|
||||
_drbd_request_state_holding_state_mutex(struct drbd_device *, union drbd_state,
|
||||
union drbd_state, enum chg_state_flags);
|
||||
|
||||
extern enum drbd_state_rv __drbd_set_state(struct drbd_device *, union drbd_state,
|
||||
enum chg_state_flags,
|
||||
struct completion *done);
|
||||
|
|
|
@ -1592,11 +1592,15 @@ void drbd_resync_after_changed(struct drbd_device *device)
|
|||
|
||||
void drbd_rs_controller_reset(struct drbd_device *device)
|
||||
{
|
||||
struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk;
|
||||
struct fifo_buffer *plan;
|
||||
|
||||
atomic_set(&device->rs_sect_in, 0);
|
||||
atomic_set(&device->rs_sect_ev, 0);
|
||||
device->rs_in_flight = 0;
|
||||
device->rs_last_events =
|
||||
(int)part_stat_read(&disk->part0, sectors[0]) +
|
||||
(int)part_stat_read(&disk->part0, sectors[1]);
|
||||
|
||||
/* Updating the RCU protected object in place is necessary since
|
||||
this function gets called from atomic context.
|
||||
|
@ -1743,7 +1747,6 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
|
|||
device->rs_failed = 0;
|
||||
device->rs_paused = 0;
|
||||
device->rs_same_csum = 0;
|
||||
device->rs_last_events = 0;
|
||||
device->rs_last_sect_ev = 0;
|
||||
device->rs_total = tw;
|
||||
device->rs_start = now;
|
||||
|
|
|
@ -78,7 +78,33 @@ module_param(home_node, int, S_IRUGO);
|
|||
MODULE_PARM_DESC(home_node, "Home node for the device");
|
||||
|
||||
static int queue_mode = NULL_Q_MQ;
|
||||
module_param(queue_mode, int, S_IRUGO);
|
||||
|
||||
static int null_param_store_val(const char *str, int *val, int min, int max)
|
||||
{
|
||||
int ret, new_val;
|
||||
|
||||
ret = kstrtoint(str, 10, &new_val);
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
|
||||
if (new_val < min || new_val > max)
|
||||
return -EINVAL;
|
||||
|
||||
*val = new_val;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
|
||||
{
|
||||
return null_param_store_val(str, &queue_mode, NULL_Q_BIO, NULL_Q_MQ);
|
||||
}
|
||||
|
||||
static struct kernel_param_ops null_queue_mode_param_ops = {
|
||||
.set = null_set_queue_mode,
|
||||
.get = param_get_int,
|
||||
};
|
||||
|
||||
device_param_cb(queue_mode, &null_queue_mode_param_ops, &queue_mode, S_IRUGO);
|
||||
MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
|
||||
|
||||
static int gb = 250;
|
||||
|
@ -94,7 +120,19 @@ module_param(nr_devices, int, S_IRUGO);
|
|||
MODULE_PARM_DESC(nr_devices, "Number of devices to register");
|
||||
|
||||
static int irqmode = NULL_IRQ_SOFTIRQ;
|
||||
module_param(irqmode, int, S_IRUGO);
|
||||
|
||||
static int null_set_irqmode(const char *str, const struct kernel_param *kp)
|
||||
{
|
||||
return null_param_store_val(str, &irqmode, NULL_IRQ_NONE,
|
||||
NULL_IRQ_TIMER);
|
||||
}
|
||||
|
||||
static struct kernel_param_ops null_irqmode_param_ops = {
|
||||
.set = null_set_irqmode,
|
||||
.get = param_get_int,
|
||||
};
|
||||
|
||||
device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO);
|
||||
MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
|
||||
|
||||
static int completion_nsec = 10000;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -2105,7 +2105,7 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
|||
|
||||
nvme_offset += unit_num_blocks;
|
||||
|
||||
nvme_sc = nvme_submit_io_cmd(dev, &c, NULL);
|
||||
nvme_sc = nvme_submit_io_cmd(dev, ns, &c, NULL);
|
||||
if (nvme_sc != NVME_SC_SUCCESS) {
|
||||
nvme_unmap_user_pages(dev,
|
||||
(is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
|
||||
|
@ -2658,7 +2658,7 @@ static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
|||
c.common.opcode = nvme_cmd_flush;
|
||||
c.common.nsid = cpu_to_le32(ns->ns_id);
|
||||
|
||||
nvme_sc = nvme_submit_io_cmd(ns->dev, &c, NULL);
|
||||
nvme_sc = nvme_submit_io_cmd(ns->dev, ns, &c, NULL);
|
||||
res = nvme_trans_status_code(hdr, nvme_sc);
|
||||
if (res)
|
||||
goto out;
|
||||
|
@ -2686,7 +2686,7 @@ static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
|
|||
c.common.opcode = nvme_cmd_flush;
|
||||
c.common.nsid = cpu_to_le32(ns->ns_id);
|
||||
|
||||
nvme_sc = nvme_submit_io_cmd(ns->dev, &c, NULL);
|
||||
nvme_sc = nvme_submit_io_cmd(ns->dev, ns, &c, NULL);
|
||||
|
||||
res = nvme_trans_status_code(hdr, nvme_sc);
|
||||
if (res)
|
||||
|
@ -2894,7 +2894,7 @@ static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
|||
c.dsm.nr = cpu_to_le32(ndesc - 1);
|
||||
c.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
|
||||
|
||||
nvme_sc = nvme_submit_io_cmd(dev, &c, NULL);
|
||||
nvme_sc = nvme_submit_io_cmd(dev, ns, &c, NULL);
|
||||
res = nvme_trans_status_code(hdr, nvme_sc);
|
||||
|
||||
dma_free_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range),
|
||||
|
@ -2915,6 +2915,14 @@ static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr)
|
|||
if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len))
|
||||
return -EFAULT;
|
||||
|
||||
/*
|
||||
* Prime the hdr with good status for scsi commands that don't require
|
||||
* an nvme command for translation.
|
||||
*/
|
||||
retcode = nvme_trans_status_code(hdr, NVME_SC_SUCCESS);
|
||||
if (retcode)
|
||||
return retcode;
|
||||
|
||||
opcode = cmd[0];
|
||||
|
||||
switch (opcode) {
|
||||
|
@ -3016,152 +3024,6 @@ int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr)
|
|||
return retcode;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
typedef struct sg_io_hdr32 {
|
||||
compat_int_t interface_id; /* [i] 'S' for SCSI generic (required) */
|
||||
compat_int_t dxfer_direction; /* [i] data transfer direction */
|
||||
unsigned char cmd_len; /* [i] SCSI command length ( <= 16 bytes) */
|
||||
unsigned char mx_sb_len; /* [i] max length to write to sbp */
|
||||
unsigned short iovec_count; /* [i] 0 implies no scatter gather */
|
||||
compat_uint_t dxfer_len; /* [i] byte count of data transfer */
|
||||
compat_uint_t dxferp; /* [i], [*io] points to data transfer memory
|
||||
or scatter gather list */
|
||||
compat_uptr_t cmdp; /* [i], [*i] points to command to perform */
|
||||
compat_uptr_t sbp; /* [i], [*o] points to sense_buffer memory */
|
||||
compat_uint_t timeout; /* [i] MAX_UINT->no timeout (unit: millisec) */
|
||||
compat_uint_t flags; /* [i] 0 -> default, see SG_FLAG... */
|
||||
compat_int_t pack_id; /* [i->o] unused internally (normally) */
|
||||
compat_uptr_t usr_ptr; /* [i->o] unused internally */
|
||||
unsigned char status; /* [o] scsi status */
|
||||
unsigned char masked_status; /* [o] shifted, masked scsi status */
|
||||
unsigned char msg_status; /* [o] messaging level data (optional) */
|
||||
unsigned char sb_len_wr; /* [o] byte count actually written to sbp */
|
||||
unsigned short host_status; /* [o] errors from host adapter */
|
||||
unsigned short driver_status; /* [o] errors from software driver */
|
||||
compat_int_t resid; /* [o] dxfer_len - actual_transferred */
|
||||
compat_uint_t duration; /* [o] time taken by cmd (unit: millisec) */
|
||||
compat_uint_t info; /* [o] auxiliary information */
|
||||
} sg_io_hdr32_t; /* 64 bytes long (on sparc32) */
|
||||
|
||||
typedef struct sg_iovec32 {
|
||||
compat_uint_t iov_base;
|
||||
compat_uint_t iov_len;
|
||||
} sg_iovec32_t;
|
||||
|
||||
static int sg_build_iovec(sg_io_hdr_t __user *sgio, void __user *dxferp, u16 iovec_count)
|
||||
{
|
||||
sg_iovec_t __user *iov = (sg_iovec_t __user *) (sgio + 1);
|
||||
sg_iovec32_t __user *iov32 = dxferp;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < iovec_count; i++) {
|
||||
u32 base, len;
|
||||
|
||||
if (get_user(base, &iov32[i].iov_base) ||
|
||||
get_user(len, &iov32[i].iov_len) ||
|
||||
put_user(compat_ptr(base), &iov[i].iov_base) ||
|
||||
put_user(len, &iov[i].iov_len))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (put_user(iov, &sgio->dxferp))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg)
|
||||
{
|
||||
sg_io_hdr32_t __user *sgio32 = (sg_io_hdr32_t __user *)arg;
|
||||
sg_io_hdr_t __user *sgio;
|
||||
u16 iovec_count;
|
||||
u32 data;
|
||||
void __user *dxferp;
|
||||
int err;
|
||||
int interface_id;
|
||||
|
||||
if (get_user(interface_id, &sgio32->interface_id))
|
||||
return -EFAULT;
|
||||
if (interface_id != 'S')
|
||||
return -EINVAL;
|
||||
|
||||
if (get_user(iovec_count, &sgio32->iovec_count))
|
||||
return -EFAULT;
|
||||
|
||||
{
|
||||
void __user *top = compat_alloc_user_space(0);
|
||||
void __user *new = compat_alloc_user_space(sizeof(sg_io_hdr_t) +
|
||||
(iovec_count * sizeof(sg_iovec_t)));
|
||||
if (new > top)
|
||||
return -EINVAL;
|
||||
|
||||
sgio = new;
|
||||
}
|
||||
|
||||
/* Ok, now construct. */
|
||||
if (copy_in_user(&sgio->interface_id, &sgio32->interface_id,
|
||||
(2 * sizeof(int)) +
|
||||
(2 * sizeof(unsigned char)) +
|
||||
(1 * sizeof(unsigned short)) +
|
||||
(1 * sizeof(unsigned int))))
|
||||
return -EFAULT;
|
||||
|
||||
if (get_user(data, &sgio32->dxferp))
|
||||
return -EFAULT;
|
||||
dxferp = compat_ptr(data);
|
||||
if (iovec_count) {
|
||||
if (sg_build_iovec(sgio, dxferp, iovec_count))
|
||||
return -EFAULT;
|
||||
} else {
|
||||
if (put_user(dxferp, &sgio->dxferp))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
{
|
||||
unsigned char __user *cmdp;
|
||||
unsigned char __user *sbp;
|
||||
|
||||
if (get_user(data, &sgio32->cmdp))
|
||||
return -EFAULT;
|
||||
cmdp = compat_ptr(data);
|
||||
|
||||
if (get_user(data, &sgio32->sbp))
|
||||
return -EFAULT;
|
||||
sbp = compat_ptr(data);
|
||||
|
||||
if (put_user(cmdp, &sgio->cmdp) ||
|
||||
put_user(sbp, &sgio->sbp))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (copy_in_user(&sgio->timeout, &sgio32->timeout,
|
||||
3 * sizeof(int)))
|
||||
return -EFAULT;
|
||||
|
||||
if (get_user(data, &sgio32->usr_ptr))
|
||||
return -EFAULT;
|
||||
if (put_user(compat_ptr(data), &sgio->usr_ptr))
|
||||
return -EFAULT;
|
||||
|
||||
err = nvme_sg_io(ns, sgio);
|
||||
if (err >= 0) {
|
||||
void __user *datap;
|
||||
|
||||
if (copy_in_user(&sgio32->pack_id, &sgio->pack_id,
|
||||
sizeof(int)) ||
|
||||
get_user(datap, &sgio->usr_ptr) ||
|
||||
put_user((u32)(unsigned long)datap,
|
||||
&sgio32->usr_ptr) ||
|
||||
copy_in_user(&sgio32->status, &sgio->status,
|
||||
(4 * sizeof(unsigned char)) +
|
||||
(2 * sizeof(unsigned short)) +
|
||||
(3 * sizeof(int))))
|
||||
err = -EFAULT;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
#endif
|
||||
|
||||
int nvme_sg_get_version_num(int __user *ip)
|
||||
{
|
||||
return put_user(sg_version_num, ip);
|
||||
|
|
|
@ -112,37 +112,16 @@ static const struct block_device_operations rsxx_fops = {
|
|||
|
||||
static void disk_stats_start(struct rsxx_cardinfo *card, struct bio *bio)
|
||||
{
|
||||
struct hd_struct *part0 = &card->gendisk->part0;
|
||||
int rw = bio_data_dir(bio);
|
||||
int cpu;
|
||||
|
||||
cpu = part_stat_lock();
|
||||
|
||||
part_round_stats(cpu, part0);
|
||||
part_inc_in_flight(part0, rw);
|
||||
|
||||
part_stat_unlock();
|
||||
generic_start_io_acct(bio_data_dir(bio), bio_sectors(bio),
|
||||
&card->gendisk->part0);
|
||||
}
|
||||
|
||||
static void disk_stats_complete(struct rsxx_cardinfo *card,
|
||||
struct bio *bio,
|
||||
unsigned long start_time)
|
||||
{
|
||||
struct hd_struct *part0 = &card->gendisk->part0;
|
||||
unsigned long duration = jiffies - start_time;
|
||||
int rw = bio_data_dir(bio);
|
||||
int cpu;
|
||||
|
||||
cpu = part_stat_lock();
|
||||
|
||||
part_stat_add(cpu, part0, sectors[rw], bio_sectors(bio));
|
||||
part_stat_inc(cpu, part0, ios[rw]);
|
||||
part_stat_add(cpu, part0, ticks[rw], duration);
|
||||
|
||||
part_round_stats(cpu, part0);
|
||||
part_dec_in_flight(part0, rw);
|
||||
|
||||
part_stat_unlock();
|
||||
generic_end_io_acct(bio_data_dir(bio), &card->gendisk->part0,
|
||||
start_time);
|
||||
}
|
||||
|
||||
static void bio_dma_done_cb(struct rsxx_cardinfo *card,
|
||||
|
|
|
@ -126,7 +126,6 @@ struct blkfront_info
|
|||
unsigned int persistent_gnts_c;
|
||||
unsigned long shadow_free;
|
||||
unsigned int feature_flush;
|
||||
unsigned int flush_op;
|
||||
unsigned int feature_discard:1;
|
||||
unsigned int feature_secdiscard:1;
|
||||
unsigned int discard_granularity;
|
||||
|
@ -479,7 +478,19 @@ static int blkif_queue_request(struct request *req)
|
|||
* way. (It's also a FLUSH+FUA, since it is
|
||||
* guaranteed ordered WRT previous writes.)
|
||||
*/
|
||||
ring_req->operation = info->flush_op;
|
||||
switch (info->feature_flush &
|
||||
((REQ_FLUSH|REQ_FUA))) {
|
||||
case REQ_FLUSH|REQ_FUA:
|
||||
ring_req->operation =
|
||||
BLKIF_OP_WRITE_BARRIER;
|
||||
break;
|
||||
case REQ_FLUSH:
|
||||
ring_req->operation =
|
||||
BLKIF_OP_FLUSH_DISKCACHE;
|
||||
break;
|
||||
default:
|
||||
ring_req->operation = 0;
|
||||
}
|
||||
}
|
||||
ring_req->u.rw.nr_segments = nseg;
|
||||
}
|
||||
|
@ -582,12 +593,14 @@ static inline void flush_requests(struct blkfront_info *info)
|
|||
notify_remote_via_irq(info->irq);
|
||||
}
|
||||
|
||||
static inline bool blkif_request_flush_valid(struct request *req,
|
||||
struct blkfront_info *info)
|
||||
static inline bool blkif_request_flush_invalid(struct request *req,
|
||||
struct blkfront_info *info)
|
||||
{
|
||||
return ((req->cmd_type != REQ_TYPE_FS) ||
|
||||
((req->cmd_flags & (REQ_FLUSH | REQ_FUA)) &&
|
||||
!info->flush_op));
|
||||
((req->cmd_flags & REQ_FLUSH) &&
|
||||
!(info->feature_flush & REQ_FLUSH)) ||
|
||||
((req->cmd_flags & REQ_FUA) &&
|
||||
!(info->feature_flush & REQ_FUA)));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -612,8 +625,8 @@ static void do_blkif_request(struct request_queue *rq)
|
|||
|
||||
blk_start_request(req);
|
||||
|
||||
if (blkif_request_flush_valid(req, info)) {
|
||||
__blk_end_request_all(req, -EIO);
|
||||
if (blkif_request_flush_invalid(req, info)) {
|
||||
__blk_end_request_all(req, -EOPNOTSUPP);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -683,20 +696,26 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static const char *flush_info(unsigned int feature_flush)
|
||||
{
|
||||
switch (feature_flush & ((REQ_FLUSH | REQ_FUA))) {
|
||||
case REQ_FLUSH|REQ_FUA:
|
||||
return "barrier: enabled;";
|
||||
case REQ_FLUSH:
|
||||
return "flush diskcache: enabled;";
|
||||
default:
|
||||
return "barrier or flush: disabled;";
|
||||
}
|
||||
}
|
||||
|
||||
static void xlvbd_flush(struct blkfront_info *info)
|
||||
{
|
||||
blk_queue_flush(info->rq, info->feature_flush);
|
||||
printk(KERN_INFO "blkfront: %s: %s: %s %s %s %s %s\n",
|
||||
info->gd->disk_name,
|
||||
info->flush_op == BLKIF_OP_WRITE_BARRIER ?
|
||||
"barrier" : (info->flush_op == BLKIF_OP_FLUSH_DISKCACHE ?
|
||||
"flush diskcache" : "barrier or flush"),
|
||||
info->feature_flush ? "enabled;" : "disabled;",
|
||||
"persistent grants:",
|
||||
info->feature_persistent ? "enabled;" : "disabled;",
|
||||
"indirect descriptors:",
|
||||
info->max_indirect_segments ? "enabled;" : "disabled;");
|
||||
pr_info("blkfront: %s: %s %s %s %s %s\n",
|
||||
info->gd->disk_name, flush_info(info->feature_flush),
|
||||
"persistent grants:", info->feature_persistent ?
|
||||
"enabled;" : "disabled;", "indirect descriptors:",
|
||||
info->max_indirect_segments ? "enabled;" : "disabled;");
|
||||
}
|
||||
|
||||
static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
|
||||
|
@ -1188,7 +1207,6 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
|
|||
if (error == -EOPNOTSUPP)
|
||||
error = 0;
|
||||
info->feature_flush = 0;
|
||||
info->flush_op = 0;
|
||||
xlvbd_flush(info);
|
||||
}
|
||||
/* fall through */
|
||||
|
@ -1808,7 +1826,6 @@ static void blkfront_connect(struct blkfront_info *info)
|
|||
physical_sector_size = sector_size;
|
||||
|
||||
info->feature_flush = 0;
|
||||
info->flush_op = 0;
|
||||
|
||||
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
|
||||
"feature-barrier", "%d", &barrier,
|
||||
|
@ -1821,10 +1838,8 @@ static void blkfront_connect(struct blkfront_info *info)
|
|||
*
|
||||
* If there are barriers, then we use flush.
|
||||
*/
|
||||
if (!err && barrier) {
|
||||
if (!err && barrier)
|
||||
info->feature_flush = REQ_FLUSH | REQ_FUA;
|
||||
info->flush_op = BLKIF_OP_WRITE_BARRIER;
|
||||
}
|
||||
/*
|
||||
* And if there is "feature-flush-cache" use that above
|
||||
* barriers.
|
||||
|
@ -1833,10 +1848,8 @@ static void blkfront_connect(struct blkfront_info *info)
|
|||
"feature-flush-cache", "%d", &flush,
|
||||
NULL);
|
||||
|
||||
if (!err && flush) {
|
||||
if (!err && flush)
|
||||
info->feature_flush = REQ_FLUSH;
|
||||
info->flush_op = BLKIF_OP_FLUSH_DISKCACHE;
|
||||
}
|
||||
|
||||
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
|
||||
"feature-discard", "%d", &discard,
|
||||
|
|
|
@ -601,13 +601,8 @@ static void request_endio(struct bio *bio, int error)
|
|||
static void bio_complete(struct search *s)
|
||||
{
|
||||
if (s->orig_bio) {
|
||||
int cpu, rw = bio_data_dir(s->orig_bio);
|
||||
unsigned long duration = jiffies - s->start_time;
|
||||
|
||||
cpu = part_stat_lock();
|
||||
part_round_stats(cpu, &s->d->disk->part0);
|
||||
part_stat_add(cpu, &s->d->disk->part0, ticks[rw], duration);
|
||||
part_stat_unlock();
|
||||
generic_end_io_acct(bio_data_dir(s->orig_bio),
|
||||
&s->d->disk->part0, s->start_time);
|
||||
|
||||
trace_bcache_request_end(s->d, s->orig_bio);
|
||||
bio_endio(s->orig_bio, s->iop.error);
|
||||
|
@ -959,12 +954,9 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
|
|||
struct search *s;
|
||||
struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
|
||||
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
|
||||
int cpu, rw = bio_data_dir(bio);
|
||||
int rw = bio_data_dir(bio);
|
||||
|
||||
cpu = part_stat_lock();
|
||||
part_stat_inc(cpu, &d->disk->part0, ios[rw]);
|
||||
part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
|
||||
part_stat_unlock();
|
||||
generic_start_io_acct(rw, bio_sectors(bio), &d->disk->part0);
|
||||
|
||||
bio->bi_bdev = dc->bdev;
|
||||
bio->bi_iter.bi_sector += dc->sb.data_offset;
|
||||
|
@ -1074,12 +1066,9 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
|
|||
struct search *s;
|
||||
struct closure *cl;
|
||||
struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
|
||||
int cpu, rw = bio_data_dir(bio);
|
||||
int rw = bio_data_dir(bio);
|
||||
|
||||
cpu = part_stat_lock();
|
||||
part_stat_inc(cpu, &d->disk->part0, ios[rw]);
|
||||
part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
|
||||
part_stat_unlock();
|
||||
generic_start_io_acct(rw, bio_sectors(bio), &d->disk->part0);
|
||||
|
||||
s = search_alloc(bio, d);
|
||||
cl = &s->cl;
|
||||
|
|
|
@ -605,13 +605,10 @@ static void end_io_acct(struct dm_io *io)
|
|||
struct mapped_device *md = io->md;
|
||||
struct bio *bio = io->bio;
|
||||
unsigned long duration = jiffies - io->start_time;
|
||||
int pending, cpu;
|
||||
int pending;
|
||||
int rw = bio_data_dir(bio);
|
||||
|
||||
cpu = part_stat_lock();
|
||||
part_round_stats(cpu, &dm_disk(md)->part0);
|
||||
part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
|
||||
part_stat_unlock();
|
||||
generic_end_io_acct(rw, &dm_disk(md)->part0, io->start_time);
|
||||
|
||||
if (unlikely(dm_stats_used(&md->stats)))
|
||||
dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
|
||||
|
@ -1651,16 +1648,12 @@ static void _dm_request(struct request_queue *q, struct bio *bio)
|
|||
{
|
||||
int rw = bio_data_dir(bio);
|
||||
struct mapped_device *md = q->queuedata;
|
||||
int cpu;
|
||||
int srcu_idx;
|
||||
struct dm_table *map;
|
||||
|
||||
map = dm_get_live_table(md, &srcu_idx);
|
||||
|
||||
cpu = part_stat_lock();
|
||||
part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
|
||||
part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
|
||||
part_stat_unlock();
|
||||
generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0);
|
||||
|
||||
/* if we're suspended, we have to queue this io for later */
|
||||
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
|
||||
|
|
|
@ -247,7 +247,6 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
|
|||
{
|
||||
const int rw = bio_data_dir(bio);
|
||||
struct mddev *mddev = q->queuedata;
|
||||
int cpu;
|
||||
unsigned int sectors;
|
||||
|
||||
if (mddev == NULL || mddev->pers == NULL
|
||||
|
@ -284,10 +283,7 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
|
|||
sectors = bio_sectors(bio);
|
||||
mddev->pers->make_request(mddev, bio);
|
||||
|
||||
cpu = part_stat_lock();
|
||||
part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
|
||||
part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
|
||||
part_stat_unlock();
|
||||
generic_start_io_acct(rw, sectors, &mddev->gendisk->part0);
|
||||
|
||||
if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
|
||||
wake_up(&mddev->sb_wait);
|
||||
|
|
|
@ -172,7 +172,7 @@ enum drbd_ret_code {
|
|||
ERR_RES_NOT_KNOWN = 158,
|
||||
ERR_RES_IN_USE = 159,
|
||||
ERR_MINOR_CONFIGURED = 160,
|
||||
ERR_MINOR_EXISTS = 161,
|
||||
ERR_MINOR_OR_VOLUME_EXISTS = 161,
|
||||
ERR_INVALID_REQUEST = 162,
|
||||
ERR_NEED_APV_100 = 163,
|
||||
ERR_NEED_ALLOW_TWO_PRI = 164,
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <linux/pci.h>
|
||||
#include <linux/miscdevice.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/blk-mq.h>
|
||||
|
||||
struct nvme_bar {
|
||||
__u64 cap; /* Controller Capabilities */
|
||||
|
@ -38,6 +39,7 @@ struct nvme_bar {
|
|||
#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
|
||||
#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
|
||||
#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
|
||||
#define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
|
||||
|
||||
enum {
|
||||
NVME_CC_ENABLE = 1 << 0,
|
||||
|
@ -70,8 +72,10 @@ extern unsigned char nvme_io_timeout;
|
|||
*/
|
||||
struct nvme_dev {
|
||||
struct list_head node;
|
||||
struct nvme_queue __rcu **queues;
|
||||
unsigned short __percpu *io_queue;
|
||||
struct nvme_queue **queues;
|
||||
struct request_queue *admin_q;
|
||||
struct blk_mq_tag_set tagset;
|
||||
struct blk_mq_tag_set admin_tagset;
|
||||
u32 __iomem *dbs;
|
||||
struct pci_dev *pci_dev;
|
||||
struct dma_pool *prp_page_pool;
|
||||
|
@ -90,15 +94,16 @@ struct nvme_dev {
|
|||
struct miscdevice miscdev;
|
||||
work_func_t reset_workfn;
|
||||
struct work_struct reset_work;
|
||||
struct work_struct cpu_work;
|
||||
char name[12];
|
||||
char serial[20];
|
||||
char model[40];
|
||||
char firmware_rev[8];
|
||||
u32 max_hw_sectors;
|
||||
u32 stripe_size;
|
||||
u32 page_size;
|
||||
u16 oncs;
|
||||
u16 abort_limit;
|
||||
u8 event_limit;
|
||||
u8 vwc;
|
||||
u8 initialized;
|
||||
};
|
||||
|
@ -132,7 +137,6 @@ struct nvme_iod {
|
|||
int offset; /* Of PRP list */
|
||||
int nents; /* Used in scatterlist */
|
||||
int length; /* Of data, in bytes */
|
||||
unsigned long start_time;
|
||||
dma_addr_t first_dma;
|
||||
struct list_head node;
|
||||
struct scatterlist sg[0];
|
||||
|
@ -150,12 +154,14 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
|
|||
*/
|
||||
void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod);
|
||||
|
||||
int nvme_setup_prps(struct nvme_dev *, struct nvme_iod *, int , gfp_t);
|
||||
int nvme_setup_prps(struct nvme_dev *, struct nvme_iod *, int, gfp_t);
|
||||
struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
|
||||
unsigned long addr, unsigned length);
|
||||
void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
|
||||
struct nvme_iod *iod);
|
||||
int nvme_submit_io_cmd(struct nvme_dev *, struct nvme_command *, u32 *);
|
||||
int nvme_submit_io_cmd(struct nvme_dev *, struct nvme_ns *,
|
||||
struct nvme_command *, u32 *);
|
||||
int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns);
|
||||
int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *,
|
||||
u32 *result);
|
||||
int nvme_identify(struct nvme_dev *, unsigned nsid, unsigned cns,
|
||||
|
|
|
@ -181,6 +181,22 @@ enum {
|
|||
NVME_LBART_ATTRIB_HIDE = 1 << 1,
|
||||
};
|
||||
|
||||
struct nvme_reservation_status {
|
||||
__le32 gen;
|
||||
__u8 rtype;
|
||||
__u8 regctl[2];
|
||||
__u8 resv5[2];
|
||||
__u8 ptpls;
|
||||
__u8 resv10[13];
|
||||
struct {
|
||||
__le16 cntlid;
|
||||
__u8 rcsts;
|
||||
__u8 resv3[5];
|
||||
__le64 hostid;
|
||||
__le64 rkey;
|
||||
} regctl_ds[];
|
||||
};
|
||||
|
||||
/* I/O commands */
|
||||
|
||||
enum nvme_opcode {
|
||||
|
@ -189,7 +205,12 @@ enum nvme_opcode {
|
|||
nvme_cmd_read = 0x02,
|
||||
nvme_cmd_write_uncor = 0x04,
|
||||
nvme_cmd_compare = 0x05,
|
||||
nvme_cmd_write_zeroes = 0x08,
|
||||
nvme_cmd_dsm = 0x09,
|
||||
nvme_cmd_resv_register = 0x0d,
|
||||
nvme_cmd_resv_report = 0x0e,
|
||||
nvme_cmd_resv_acquire = 0x11,
|
||||
nvme_cmd_resv_release = 0x15,
|
||||
};
|
||||
|
||||
struct nvme_common_command {
|
||||
|
@ -305,7 +326,11 @@ enum {
|
|||
NVME_FEAT_IRQ_CONFIG = 0x09,
|
||||
NVME_FEAT_WRITE_ATOMIC = 0x0a,
|
||||
NVME_FEAT_ASYNC_EVENT = 0x0b,
|
||||
NVME_FEAT_SW_PROGRESS = 0x0c,
|
||||
NVME_FEAT_AUTO_PST = 0x0c,
|
||||
NVME_FEAT_SW_PROGRESS = 0x80,
|
||||
NVME_FEAT_HOST_ID = 0x81,
|
||||
NVME_FEAT_RESV_MASK = 0x82,
|
||||
NVME_FEAT_RESV_PERSIST = 0x83,
|
||||
NVME_LOG_ERROR = 0x01,
|
||||
NVME_LOG_SMART = 0x02,
|
||||
NVME_LOG_FW_SLOT = 0x03,
|
||||
|
@ -440,9 +465,15 @@ enum {
|
|||
NVME_SC_FUSED_MISSING = 0xa,
|
||||
NVME_SC_INVALID_NS = 0xb,
|
||||
NVME_SC_CMD_SEQ_ERROR = 0xc,
|
||||
NVME_SC_SGL_INVALID_LAST = 0xd,
|
||||
NVME_SC_SGL_INVALID_COUNT = 0xe,
|
||||
NVME_SC_SGL_INVALID_DATA = 0xf,
|
||||
NVME_SC_SGL_INVALID_METADATA = 0x10,
|
||||
NVME_SC_SGL_INVALID_TYPE = 0x11,
|
||||
NVME_SC_LBA_RANGE = 0x80,
|
||||
NVME_SC_CAP_EXCEEDED = 0x81,
|
||||
NVME_SC_NS_NOT_READY = 0x82,
|
||||
NVME_SC_RESERVATION_CONFLICT = 0x83,
|
||||
NVME_SC_CQ_INVALID = 0x100,
|
||||
NVME_SC_QID_INVALID = 0x101,
|
||||
NVME_SC_QUEUE_SIZE = 0x102,
|
||||
|
@ -454,7 +485,15 @@ enum {
|
|||
NVME_SC_INVALID_VECTOR = 0x108,
|
||||
NVME_SC_INVALID_LOG_PAGE = 0x109,
|
||||
NVME_SC_INVALID_FORMAT = 0x10a,
|
||||
NVME_SC_FIRMWARE_NEEDS_RESET = 0x10b,
|
||||
NVME_SC_INVALID_QUEUE = 0x10c,
|
||||
NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d,
|
||||
NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e,
|
||||
NVME_SC_FEATURE_NOT_PER_NS = 0x10f,
|
||||
NVME_SC_FW_NEEDS_RESET_SUBSYS = 0x110,
|
||||
NVME_SC_BAD_ATTRIBUTES = 0x180,
|
||||
NVME_SC_INVALID_PI = 0x181,
|
||||
NVME_SC_READ_ONLY = 0x182,
|
||||
NVME_SC_WRITE_FAULT = 0x280,
|
||||
NVME_SC_READ_ERROR = 0x281,
|
||||
NVME_SC_GUARD_CHECK = 0x282,
|
||||
|
@ -489,7 +528,7 @@ struct nvme_user_io {
|
|||
__u16 appmask;
|
||||
};
|
||||
|
||||
struct nvme_admin_cmd {
|
||||
struct nvme_passthru_cmd {
|
||||
__u8 opcode;
|
||||
__u8 flags;
|
||||
__u16 rsvd1;
|
||||
|
@ -510,8 +549,11 @@ struct nvme_admin_cmd {
|
|||
__u32 result;
|
||||
};
|
||||
|
||||
#define nvme_admin_cmd nvme_passthru_cmd
|
||||
|
||||
#define NVME_IOCTL_ID _IO('N', 0x40)
|
||||
#define NVME_IOCTL_ADMIN_CMD _IOWR('N', 0x41, struct nvme_admin_cmd)
|
||||
#define NVME_IOCTL_SUBMIT_IO _IOW('N', 0x42, struct nvme_user_io)
|
||||
#define NVME_IOCTL_IO_CMD _IOWR('N', 0x43, struct nvme_passthru_cmd)
|
||||
|
||||
#endif /* _UAPI_LINUX_NVME_H */
|
||||
|
|
Loading…
Reference in New Issue