mirror of https://gitee.com/openkylin/linux.git
Merge branch 'from-linus' into upstream
This commit is contained in:
commit
f587fb74b2
4
Makefile
4
Makefile
|
@ -1,8 +1,8 @@
|
|||
VERSION = 2
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 17
|
||||
EXTRAVERSION =-rc4
|
||||
NAME=Sliding Snow Leopard
|
||||
EXTRAVERSION =-rc5
|
||||
NAME=Lordi Rules
|
||||
|
||||
# *DOCUMENTATION*
|
||||
# To see a list of typical targets execute "make help"
|
||||
|
|
|
@ -92,7 +92,7 @@ void __restore_processor_state(struct saved_context *ctxt)
|
|||
write_cr4(ctxt->cr4);
|
||||
write_cr3(ctxt->cr3);
|
||||
write_cr2(ctxt->cr2);
|
||||
write_cr2(ctxt->cr0);
|
||||
write_cr0(ctxt->cr0);
|
||||
|
||||
/*
|
||||
* now restore the descriptor tables to their proper values
|
||||
|
|
|
@ -338,6 +338,8 @@ SYSCALL(symlinkat)
|
|||
SYSCALL(readlinkat)
|
||||
SYSCALL(fchmodat)
|
||||
SYSCALL(faccessat)
|
||||
COMPAT_SYS(get_robust_list)
|
||||
COMPAT_SYS(set_robust_list)
|
||||
|
||||
/*
|
||||
* please add new calls to arch/powerpc/platforms/cell/spu_callbacks.c
|
||||
|
|
|
@ -258,6 +258,7 @@ void *spu_syscall_table[] = {
|
|||
[__NR_futex] sys_futex,
|
||||
[__NR_sched_setaffinity] sys_sched_setaffinity,
|
||||
[__NR_sched_getaffinity] sys_sched_getaffinity,
|
||||
[224] sys_ni_syscall,
|
||||
[__NR_tuxcall] sys_ni_syscall,
|
||||
[226] sys_ni_syscall,
|
||||
[__NR_io_setup] sys_io_setup,
|
||||
|
@ -332,19 +333,21 @@ void *spu_syscall_table[] = {
|
|||
[__NR_readlinkat] sys_readlinkat,
|
||||
[__NR_fchmodat] sys_fchmodat,
|
||||
[__NR_faccessat] sys_faccessat,
|
||||
[__NR_get_robust_list] sys_get_robust_list,
|
||||
[__NR_set_robust_list] sys_set_robust_list,
|
||||
};
|
||||
|
||||
long spu_sys_callback(struct spu_syscall_block *s)
|
||||
{
|
||||
long (*syscall)(u64 a1, u64 a2, u64 a3, u64 a4, u64 a5, u64 a6);
|
||||
|
||||
syscall = spu_syscall_table[s->nr_ret];
|
||||
|
||||
if (s->nr_ret >= ARRAY_SIZE(spu_syscall_table)) {
|
||||
pr_debug("%s: invalid syscall #%ld", __FUNCTION__, s->nr_ret);
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
syscall = spu_syscall_table[s->nr_ret];
|
||||
|
||||
#ifdef DEBUG
|
||||
print_symbol(KERN_DEBUG "SPU-syscall %s:", (unsigned long)syscall);
|
||||
printk("syscall%ld(%lx, %lx, %lx, %lx, %lx, %lx)\n",
|
||||
|
|
|
@ -272,7 +272,7 @@ static inline void stop_hz_timer(void)
|
|||
next = next_timer_interrupt();
|
||||
do {
|
||||
seq = read_seqbegin_irqsave(&xtime_lock, flags);
|
||||
timer = (__u64 next) - (__u64 jiffies) + jiffies_64;
|
||||
timer = ((__u64) next) - ((__u64) jiffies) + jiffies_64;
|
||||
} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
|
||||
todval = -1ULL;
|
||||
/* Be careful about overflows. */
|
||||
|
|
|
@ -218,7 +218,7 @@ static inline void iommu_free_ctx(struct pci_iommu *iommu, int ctx)
|
|||
* DMA for PCI device PDEV. Return non-NULL cpu-side address if
|
||||
* successful and set *DMA_ADDRP to the PCI side dma address.
|
||||
*/
|
||||
static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
|
||||
static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp)
|
||||
{
|
||||
struct pcidev_cookie *pcp;
|
||||
struct pci_iommu *iommu;
|
||||
|
@ -232,7 +232,7 @@ static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr
|
|||
if (order >= 10)
|
||||
return NULL;
|
||||
|
||||
first_page = __get_free_pages(GFP_ATOMIC, order);
|
||||
first_page = __get_free_pages(gfp, order);
|
||||
if (first_page == 0UL)
|
||||
return NULL;
|
||||
memset((char *)first_page, 0, PAGE_SIZE << order);
|
||||
|
|
|
@ -154,7 +154,7 @@ static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, un
|
|||
__clear_bit(i, arena->map);
|
||||
}
|
||||
|
||||
static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
|
||||
static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp)
|
||||
{
|
||||
struct pcidev_cookie *pcp;
|
||||
struct pci_iommu *iommu;
|
||||
|
@ -169,7 +169,7 @@ static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr
|
|||
|
||||
npages = size >> IO_PAGE_SHIFT;
|
||||
|
||||
first_page = __get_free_pages(GFP_ATOMIC, order);
|
||||
first_page = __get_free_pages(gfp, order);
|
||||
if (unlikely(first_page == 0UL))
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -3452,7 +3452,12 @@ void end_that_request_last(struct request *req, int uptodate)
|
|||
if (unlikely(laptop_mode) && blk_fs_request(req))
|
||||
laptop_io_completion();
|
||||
|
||||
if (disk && blk_fs_request(req)) {
|
||||
/*
|
||||
* Account IO completion. bar_rq isn't accounted as a normal
|
||||
* IO on queueing nor completion. Accounting the containing
|
||||
* request is enough.
|
||||
*/
|
||||
if (disk && blk_fs_request(req) && req != &req->q->bar_rq) {
|
||||
unsigned long duration = jiffies - req->start_time;
|
||||
const int rw = rq_data_dir(req);
|
||||
|
||||
|
|
|
@ -398,7 +398,7 @@ int tty_insert_flip_string_flags(struct tty_struct *tty,
|
|||
while (unlikely(size > copied));
|
||||
return copied;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tty_insert_flip_string_flags);
|
||||
EXPORT_SYMBOL(tty_insert_flip_string_flags);
|
||||
|
||||
void tty_schedule_flip(struct tty_struct *tty)
|
||||
{
|
||||
|
|
|
@ -1905,19 +1905,19 @@ static void __exit infinipath_cleanup(void)
|
|||
} else
|
||||
ipath_dbg("irq is 0, not doing free_irq "
|
||||
"for unit %u\n", dd->ipath_unit);
|
||||
|
||||
/*
|
||||
* we check for NULL here, because it's outside
|
||||
* the kregbase check, and we need to call it
|
||||
* after the free_irq. Thus it's possible that
|
||||
* the function pointers were never initialized.
|
||||
*/
|
||||
if (dd->ipath_f_cleanup)
|
||||
/* clean up chip-specific stuff */
|
||||
dd->ipath_f_cleanup(dd);
|
||||
|
||||
dd->pcidev = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* we check for NULL here, because it's outside the kregbase
|
||||
* check, and we need to call it after the free_irq. Thus
|
||||
* it's possible that the function pointers were never
|
||||
* initialized.
|
||||
*/
|
||||
if (dd->ipath_f_cleanup)
|
||||
/* clean up chip-specific stuff */
|
||||
dd->ipath_f_cleanup(dd);
|
||||
|
||||
spin_lock_irqsave(&ipath_devs_lock, flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -505,11 +505,10 @@ static u8 flash_csum(struct ipath_flash *ifp, int adjust)
|
|||
* ipath_get_guid - get the GUID from the i2c device
|
||||
* @dd: the infinipath device
|
||||
*
|
||||
* When we add the multi-chip support, we will probably have to add
|
||||
* the ability to use the number of guids field, and get the guid from
|
||||
* the first chip's flash, to use for all of them.
|
||||
* We have the capability to use the ipath_nguid field, and get
|
||||
* the guid from the first chip's flash, to use for all of them.
|
||||
*/
|
||||
void ipath_get_guid(struct ipath_devdata *dd)
|
||||
void ipath_get_eeprom_info(struct ipath_devdata *dd)
|
||||
{
|
||||
void *buf;
|
||||
struct ipath_flash *ifp;
|
||||
|
|
|
@ -139,7 +139,7 @@ static int ipath_get_base_info(struct ipath_portdata *pd,
|
|||
kinfo->spi_piosize = dd->ipath_ibmaxlen;
|
||||
kinfo->spi_mtu = dd->ipath_ibmaxlen; /* maxlen, not ibmtu */
|
||||
kinfo->spi_port = pd->port_port;
|
||||
kinfo->spi_sw_version = IPATH_USER_SWVERSION;
|
||||
kinfo->spi_sw_version = IPATH_KERN_SWVERSION;
|
||||
kinfo->spi_hw_version = dd->ipath_revision;
|
||||
|
||||
if (copy_to_user(ubase, kinfo, sizeof(*kinfo)))
|
||||
|
@ -1224,6 +1224,10 @@ static unsigned int ipath_poll(struct file *fp,
|
|||
|
||||
if (tail == head) {
|
||||
set_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag);
|
||||
if(dd->ipath_rhdrhead_intr_off) /* arm rcv interrupt */
|
||||
(void)ipath_write_ureg(dd, ur_rcvhdrhead,
|
||||
dd->ipath_rhdrhead_intr_off
|
||||
| head, pd->port_port);
|
||||
poll_wait(fp, &pd->port_wait, pt);
|
||||
|
||||
if (test_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag)) {
|
||||
|
|
|
@ -607,7 +607,12 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name,
|
|||
case 4: /* Ponderosa is one of the bringup boards */
|
||||
n = "Ponderosa";
|
||||
break;
|
||||
case 5: /* HT-460 original production board */
|
||||
case 5:
|
||||
/*
|
||||
* HT-460 original production board; two production levels, with
|
||||
* different serial number ranges. See ipath_ht_early_init() for
|
||||
* case where we enable IPATH_GPIO_INTR for later serial # range.
|
||||
*/
|
||||
n = "InfiniPath_HT-460";
|
||||
break;
|
||||
case 6:
|
||||
|
@ -642,7 +647,7 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name,
|
|||
if (n)
|
||||
snprintf(name, namelen, "%s", n);
|
||||
|
||||
if (dd->ipath_majrev != 3 || dd->ipath_minrev != 2) {
|
||||
if (dd->ipath_majrev != 3 || (dd->ipath_minrev < 2 || dd->ipath_minrev > 3)) {
|
||||
/*
|
||||
* This version of the driver only supports the HT-400
|
||||
* Rev 3.2
|
||||
|
@ -1520,6 +1525,18 @@ static int ipath_ht_early_init(struct ipath_devdata *dd)
|
|||
*/
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
|
||||
INFINIPATH_S_ABORT);
|
||||
|
||||
ipath_get_eeprom_info(dd);
|
||||
if(dd->ipath_boardrev == 5 && dd->ipath_serial[0] == '1' &&
|
||||
dd->ipath_serial[1] == '2' && dd->ipath_serial[2] == '8') {
|
||||
/*
|
||||
* Later production HT-460 has same changes as HT-465, so
|
||||
* can use GPIO interrupts. They have serial #'s starting
|
||||
* with 128, rather than 112.
|
||||
*/
|
||||
dd->ipath_flags |= IPATH_GPIO_INTR;
|
||||
dd->ipath_flags &= ~IPATH_POLL_RX_INTR;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -879,7 +879,6 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
|
|||
|
||||
done:
|
||||
if (!ret) {
|
||||
ipath_get_guid(dd);
|
||||
*dd->ipath_statusp |= IPATH_STATUS_CHIP_PRESENT;
|
||||
if (!dd->ipath_f_intrsetup(dd)) {
|
||||
/* now we can enable all interrupts from the chip */
|
||||
|
|
|
@ -650,7 +650,7 @@ u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32 *);
|
|||
void ipath_init_pe800_funcs(struct ipath_devdata *);
|
||||
/* init HT-400-specific func */
|
||||
void ipath_init_ht400_funcs(struct ipath_devdata *);
|
||||
void ipath_get_guid(struct ipath_devdata *);
|
||||
void ipath_get_eeprom_info(struct ipath_devdata *);
|
||||
u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg);
|
||||
|
||||
/*
|
||||
|
|
|
@ -136,9 +136,7 @@ int ipath_lkey_ok(struct ipath_lkey_table *rkt, struct ipath_sge *isge,
|
|||
ret = 1;
|
||||
goto bail;
|
||||
}
|
||||
spin_lock(&rkt->lock);
|
||||
mr = rkt->table[(sge->lkey >> (32 - ib_ipath_lkey_table_size))];
|
||||
spin_unlock(&rkt->lock);
|
||||
if (unlikely(mr == NULL || mr->lkey != sge->lkey)) {
|
||||
ret = 0;
|
||||
goto bail;
|
||||
|
@ -184,8 +182,6 @@ int ipath_lkey_ok(struct ipath_lkey_table *rkt, struct ipath_sge *isge,
|
|||
* @acc: access flags
|
||||
*
|
||||
* Return 1 if successful, otherwise 0.
|
||||
*
|
||||
* The QP r_rq.lock should be held.
|
||||
*/
|
||||
int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss,
|
||||
u32 len, u64 vaddr, u32 rkey, int acc)
|
||||
|
@ -196,9 +192,7 @@ int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss,
|
|||
size_t off;
|
||||
int ret;
|
||||
|
||||
spin_lock(&rkt->lock);
|
||||
mr = rkt->table[(rkey >> (32 - ib_ipath_lkey_table_size))];
|
||||
spin_unlock(&rkt->lock);
|
||||
if (unlikely(mr == NULL || mr->lkey != rkey)) {
|
||||
ret = 0;
|
||||
goto bail;
|
||||
|
|
|
@ -872,12 +872,13 @@ static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
|
|||
update_sge(ss, len);
|
||||
length -= len;
|
||||
}
|
||||
/* Update address before sending packet. */
|
||||
update_sge(ss, length);
|
||||
/* must flush early everything before trigger word */
|
||||
ipath_flush_wc();
|
||||
__raw_writel(last, piobuf);
|
||||
/* be sure trigger word is written */
|
||||
ipath_flush_wc();
|
||||
update_sge(ss, length);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -943,17 +944,18 @@ int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
|
|||
if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
|
||||
!((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
|
||||
u32 w;
|
||||
u32 *addr = (u32 *) ss->sge.vaddr;
|
||||
|
||||
/* Update address before sending packet. */
|
||||
update_sge(ss, len);
|
||||
/* Need to round up for the last dword in the packet. */
|
||||
w = (len + 3) >> 2;
|
||||
__iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
|
||||
__iowrite32_copy(piobuf, addr, w - 1);
|
||||
/* must flush early everything before trigger word */
|
||||
ipath_flush_wc();
|
||||
__raw_writel(((u32 *) ss->sge.vaddr)[w - 1],
|
||||
piobuf + w - 1);
|
||||
__raw_writel(addr[w - 1], piobuf + w - 1);
|
||||
/* be sure trigger word is written */
|
||||
ipath_flush_wc();
|
||||
update_sge(ss, len);
|
||||
ret = 0;
|
||||
goto bail;
|
||||
}
|
||||
|
|
|
@ -1180,6 +1180,8 @@ static int ipath_pe_early_init(struct ipath_devdata *dd)
|
|||
*/
|
||||
dd->ipath_rhdrhead_intr_off = 1ULL<<32;
|
||||
|
||||
ipath_get_eeprom_info(dd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -375,10 +375,10 @@ static void ipath_error_qp(struct ipath_qp *qp)
|
|||
|
||||
spin_lock(&dev->pending_lock);
|
||||
/* XXX What if its already removed by the timeout code? */
|
||||
if (qp->timerwait.next != LIST_POISON1)
|
||||
list_del(&qp->timerwait);
|
||||
if (qp->piowait.next != LIST_POISON1)
|
||||
list_del(&qp->piowait);
|
||||
if (!list_empty(&qp->timerwait))
|
||||
list_del_init(&qp->timerwait);
|
||||
if (!list_empty(&qp->piowait))
|
||||
list_del_init(&qp->piowait);
|
||||
spin_unlock(&dev->pending_lock);
|
||||
|
||||
wc.status = IB_WC_WR_FLUSH_ERR;
|
||||
|
@ -427,6 +427,7 @@ static void ipath_error_qp(struct ipath_qp *qp)
|
|||
int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
int attr_mask)
|
||||
{
|
||||
struct ipath_ibdev *dev = to_idev(ibqp->device);
|
||||
struct ipath_qp *qp = to_iqp(ibqp);
|
||||
enum ib_qp_state cur_state, new_state;
|
||||
unsigned long flags;
|
||||
|
@ -443,6 +444,19 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|||
attr_mask))
|
||||
goto inval;
|
||||
|
||||
if (attr_mask & IB_QP_AV)
|
||||
if (attr->ah_attr.dlid == 0 ||
|
||||
attr->ah_attr.dlid >= IPS_MULTICAST_LID_BASE)
|
||||
goto inval;
|
||||
|
||||
if (attr_mask & IB_QP_PKEY_INDEX)
|
||||
if (attr->pkey_index >= ipath_layer_get_npkeys(dev->dd))
|
||||
goto inval;
|
||||
|
||||
if (attr_mask & IB_QP_MIN_RNR_TIMER)
|
||||
if (attr->min_rnr_timer > 31)
|
||||
goto inval;
|
||||
|
||||
switch (new_state) {
|
||||
case IB_QPS_RESET:
|
||||
ipath_reset_qp(qp);
|
||||
|
@ -457,13 +471,8 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|||
|
||||
}
|
||||
|
||||
if (attr_mask & IB_QP_PKEY_INDEX) {
|
||||
struct ipath_ibdev *dev = to_idev(ibqp->device);
|
||||
|
||||
if (attr->pkey_index >= ipath_layer_get_npkeys(dev->dd))
|
||||
goto inval;
|
||||
if (attr_mask & IB_QP_PKEY_INDEX)
|
||||
qp->s_pkey_index = attr->pkey_index;
|
||||
}
|
||||
|
||||
if (attr_mask & IB_QP_DEST_QPN)
|
||||
qp->remote_qpn = attr->dest_qp_num;
|
||||
|
@ -479,12 +488,8 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|||
if (attr_mask & IB_QP_ACCESS_FLAGS)
|
||||
qp->qp_access_flags = attr->qp_access_flags;
|
||||
|
||||
if (attr_mask & IB_QP_AV) {
|
||||
if (attr->ah_attr.dlid == 0 ||
|
||||
attr->ah_attr.dlid >= IPS_MULTICAST_LID_BASE)
|
||||
goto inval;
|
||||
if (attr_mask & IB_QP_AV)
|
||||
qp->remote_ah_attr = attr->ah_attr;
|
||||
}
|
||||
|
||||
if (attr_mask & IB_QP_PATH_MTU)
|
||||
qp->path_mtu = attr->path_mtu;
|
||||
|
@ -499,11 +504,8 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|||
qp->s_rnr_retry_cnt = qp->s_rnr_retry;
|
||||
}
|
||||
|
||||
if (attr_mask & IB_QP_MIN_RNR_TIMER) {
|
||||
if (attr->min_rnr_timer > 31)
|
||||
goto inval;
|
||||
if (attr_mask & IB_QP_MIN_RNR_TIMER)
|
||||
qp->s_min_rnr_timer = attr->min_rnr_timer;
|
||||
}
|
||||
|
||||
if (attr_mask & IB_QP_QKEY)
|
||||
qp->qkey = attr->qkey;
|
||||
|
@ -710,10 +712,8 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
|
|||
init_attr->qp_type == IB_QPT_RC ?
|
||||
ipath_do_rc_send : ipath_do_uc_send,
|
||||
(unsigned long)qp);
|
||||
qp->piowait.next = LIST_POISON1;
|
||||
qp->piowait.prev = LIST_POISON2;
|
||||
qp->timerwait.next = LIST_POISON1;
|
||||
qp->timerwait.prev = LIST_POISON2;
|
||||
INIT_LIST_HEAD(&qp->piowait);
|
||||
INIT_LIST_HEAD(&qp->timerwait);
|
||||
qp->state = IB_QPS_RESET;
|
||||
qp->s_wq = swq;
|
||||
qp->s_size = init_attr->cap.max_send_wr + 1;
|
||||
|
@ -734,7 +734,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
|
|||
ipath_reset_qp(qp);
|
||||
|
||||
/* Tell the core driver that the kernel SMA is present. */
|
||||
if (qp->ibqp.qp_type == IB_QPT_SMI)
|
||||
if (init_attr->qp_type == IB_QPT_SMI)
|
||||
ipath_layer_set_verbs_flags(dev->dd,
|
||||
IPATH_VERBS_KERNEL_SMA);
|
||||
break;
|
||||
|
@ -783,10 +783,10 @@ int ipath_destroy_qp(struct ib_qp *ibqp)
|
|||
|
||||
/* Make sure the QP isn't on the timeout list. */
|
||||
spin_lock_irqsave(&dev->pending_lock, flags);
|
||||
if (qp->timerwait.next != LIST_POISON1)
|
||||
list_del(&qp->timerwait);
|
||||
if (qp->piowait.next != LIST_POISON1)
|
||||
list_del(&qp->piowait);
|
||||
if (!list_empty(&qp->timerwait))
|
||||
list_del_init(&qp->timerwait);
|
||||
if (!list_empty(&qp->piowait))
|
||||
list_del_init(&qp->piowait);
|
||||
spin_unlock_irqrestore(&dev->pending_lock, flags);
|
||||
|
||||
/*
|
||||
|
@ -855,10 +855,10 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc)
|
|||
|
||||
spin_lock(&dev->pending_lock);
|
||||
/* XXX What if its already removed by the timeout code? */
|
||||
if (qp->timerwait.next != LIST_POISON1)
|
||||
list_del(&qp->timerwait);
|
||||
if (qp->piowait.next != LIST_POISON1)
|
||||
list_del(&qp->piowait);
|
||||
if (!list_empty(&qp->timerwait))
|
||||
list_del_init(&qp->timerwait);
|
||||
if (!list_empty(&qp->piowait))
|
||||
list_del_init(&qp->piowait);
|
||||
spin_unlock(&dev->pending_lock);
|
||||
|
||||
ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1);
|
||||
|
|
|
@ -57,7 +57,7 @@ static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe)
|
|||
qp->s_len = wqe->length - len;
|
||||
dev = to_idev(qp->ibqp.device);
|
||||
spin_lock(&dev->pending_lock);
|
||||
if (qp->timerwait.next == LIST_POISON1)
|
||||
if (list_empty(&qp->timerwait))
|
||||
list_add_tail(&qp->timerwait,
|
||||
&dev->pending[dev->pending_index]);
|
||||
spin_unlock(&dev->pending_lock);
|
||||
|
@ -356,7 +356,7 @@ static inline int ipath_make_rc_req(struct ipath_qp *qp,
|
|||
if ((int)(qp->s_psn - qp->s_next_psn) > 0)
|
||||
qp->s_next_psn = qp->s_psn;
|
||||
spin_lock(&dev->pending_lock);
|
||||
if (qp->timerwait.next == LIST_POISON1)
|
||||
if (list_empty(&qp->timerwait))
|
||||
list_add_tail(&qp->timerwait,
|
||||
&dev->pending[dev->pending_index]);
|
||||
spin_unlock(&dev->pending_lock);
|
||||
|
@ -726,8 +726,8 @@ void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
|
|||
*/
|
||||
dev = to_idev(qp->ibqp.device);
|
||||
spin_lock(&dev->pending_lock);
|
||||
if (qp->timerwait.next != LIST_POISON1)
|
||||
list_del(&qp->timerwait);
|
||||
if (!list_empty(&qp->timerwait))
|
||||
list_del_init(&qp->timerwait);
|
||||
spin_unlock(&dev->pending_lock);
|
||||
|
||||
if (wqe->wr.opcode == IB_WR_RDMA_READ)
|
||||
|
@ -886,8 +886,8 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
|
|||
* just won't find anything to restart if we ACK everything.
|
||||
*/
|
||||
spin_lock(&dev->pending_lock);
|
||||
if (qp->timerwait.next != LIST_POISON1)
|
||||
list_del(&qp->timerwait);
|
||||
if (!list_empty(&qp->timerwait))
|
||||
list_del_init(&qp->timerwait);
|
||||
spin_unlock(&dev->pending_lock);
|
||||
|
||||
/*
|
||||
|
@ -1194,8 +1194,7 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
|
|||
IB_WR_RDMA_READ))
|
||||
goto ack_done;
|
||||
spin_lock(&dev->pending_lock);
|
||||
if (qp->s_rnr_timeout == 0 &&
|
||||
qp->timerwait.next != LIST_POISON1)
|
||||
if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait))
|
||||
list_move_tail(&qp->timerwait,
|
||||
&dev->pending[dev->pending_index]);
|
||||
spin_unlock(&dev->pending_lock);
|
||||
|
|
|
@ -435,7 +435,7 @@ void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev)
|
|||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev->pending_lock, flags);
|
||||
if (qp->piowait.next == LIST_POISON1)
|
||||
if (list_empty(&qp->piowait))
|
||||
list_add_tail(&qp->piowait, &dev->piowait);
|
||||
spin_unlock_irqrestore(&dev->pending_lock, flags);
|
||||
/*
|
||||
|
|
|
@ -464,7 +464,7 @@ static void ipath_ib_timer(void *arg)
|
|||
last = &dev->pending[dev->pending_index];
|
||||
while (!list_empty(last)) {
|
||||
qp = list_entry(last->next, struct ipath_qp, timerwait);
|
||||
list_del(&qp->timerwait);
|
||||
list_del_init(&qp->timerwait);
|
||||
qp->timer_next = resend;
|
||||
resend = qp;
|
||||
atomic_inc(&qp->refcount);
|
||||
|
@ -474,7 +474,7 @@ static void ipath_ib_timer(void *arg)
|
|||
qp = list_entry(last->next, struct ipath_qp, timerwait);
|
||||
if (--qp->s_rnr_timeout == 0) {
|
||||
do {
|
||||
list_del(&qp->timerwait);
|
||||
list_del_init(&qp->timerwait);
|
||||
tasklet_hi_schedule(&qp->s_task);
|
||||
if (list_empty(last))
|
||||
break;
|
||||
|
@ -554,7 +554,7 @@ static int ipath_ib_piobufavail(void *arg)
|
|||
while (!list_empty(&dev->piowait)) {
|
||||
qp = list_entry(dev->piowait.next, struct ipath_qp,
|
||||
piowait);
|
||||
list_del(&qp->piowait);
|
||||
list_del_init(&qp->piowait);
|
||||
tasklet_hi_schedule(&qp->s_task);
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->pending_lock, flags);
|
||||
|
@ -951,6 +951,7 @@ static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd)
|
|||
idev->dd = dd;
|
||||
|
||||
strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX);
|
||||
dev->owner = THIS_MODULE;
|
||||
dev->node_guid = ipath_layer_get_guid(dd);
|
||||
dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION;
|
||||
dev->uverbs_cmd_mask =
|
||||
|
|
|
@ -331,13 +331,14 @@ static int raid0_run (mddev_t *mddev)
|
|||
goto out_free_conf;
|
||||
size = conf->strip_zone[cur].size;
|
||||
|
||||
for (i=0; i< nb_zone; i++) {
|
||||
conf->hash_table[i] = conf->strip_zone + cur;
|
||||
conf->hash_table[0] = conf->strip_zone + cur;
|
||||
for (i=1; i< nb_zone; i++) {
|
||||
while (size <= conf->hash_spacing) {
|
||||
cur++;
|
||||
size += conf->strip_zone[cur].size;
|
||||
}
|
||||
size -= conf->hash_spacing;
|
||||
conf->hash_table[i] = conf->strip_zone + cur;
|
||||
}
|
||||
if (conf->preshift) {
|
||||
conf->hash_spacing >>= conf->preshift;
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
config VIDEO_SAA7146
|
||||
tristate
|
||||
select I2C
|
||||
depends on I2C
|
||||
|
||||
config VIDEO_SAA7146_VV
|
||||
tristate
|
||||
|
|
|
@ -22,26 +22,26 @@ config DVB
|
|||
source "drivers/media/dvb/dvb-core/Kconfig"
|
||||
|
||||
comment "Supported SAA7146 based PCI Adapters"
|
||||
depends on DVB_CORE && PCI
|
||||
depends on DVB_CORE && PCI && I2C
|
||||
source "drivers/media/dvb/ttpci/Kconfig"
|
||||
|
||||
comment "Supported USB Adapters"
|
||||
depends on DVB_CORE && USB
|
||||
depends on DVB_CORE && USB && I2C
|
||||
source "drivers/media/dvb/dvb-usb/Kconfig"
|
||||
source "drivers/media/dvb/ttusb-budget/Kconfig"
|
||||
source "drivers/media/dvb/ttusb-dec/Kconfig"
|
||||
source "drivers/media/dvb/cinergyT2/Kconfig"
|
||||
|
||||
comment "Supported FlexCopII (B2C2) Adapters"
|
||||
depends on DVB_CORE && (PCI || USB)
|
||||
depends on DVB_CORE && (PCI || USB) && I2C
|
||||
source "drivers/media/dvb/b2c2/Kconfig"
|
||||
|
||||
comment "Supported BT878 Adapters"
|
||||
depends on DVB_CORE && PCI
|
||||
depends on DVB_CORE && PCI && I2C
|
||||
source "drivers/media/dvb/bt8xx/Kconfig"
|
||||
|
||||
comment "Supported Pluto2 Adapters"
|
||||
depends on DVB_CORE && PCI
|
||||
depends on DVB_CORE && PCI && I2C
|
||||
source "drivers/media/dvb/pluto2/Kconfig"
|
||||
|
||||
comment "Supported DVB Frontends"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
config DVB_B2C2_FLEXCOP
|
||||
tristate "Technisat/B2C2 FlexCopII(b) and FlexCopIII adapters"
|
||||
depends on DVB_CORE
|
||||
depends on DVB_CORE && I2C
|
||||
select DVB_STV0299
|
||||
select DVB_MT352
|
||||
select DVB_MT312
|
||||
|
@ -16,7 +16,7 @@ config DVB_B2C2_FLEXCOP
|
|||
|
||||
config DVB_B2C2_FLEXCOP_PCI
|
||||
tristate "Technisat/B2C2 Air/Sky/Cable2PC PCI"
|
||||
depends on DVB_B2C2_FLEXCOP && PCI
|
||||
depends on DVB_B2C2_FLEXCOP && PCI && I2C
|
||||
help
|
||||
Support for the Air/Sky/CableStar2 PCI card (DVB/ATSC) by Technisat/B2C2.
|
||||
|
||||
|
@ -24,7 +24,7 @@ config DVB_B2C2_FLEXCOP_PCI
|
|||
|
||||
config DVB_B2C2_FLEXCOP_USB
|
||||
tristate "Technisat/B2C2 Air/Sky/Cable2PC USB"
|
||||
depends on DVB_B2C2_FLEXCOP && USB
|
||||
depends on DVB_B2C2_FLEXCOP && USB && I2C
|
||||
help
|
||||
Support for the Air/Sky/Cable2PC USB1.1 box (DVB/ATSC) by Technisat/B2C2,
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
config DVB_BT8XX
|
||||
tristate "BT8xx based PCI cards"
|
||||
depends on DVB_CORE && PCI && VIDEO_BT848
|
||||
depends on DVB_CORE && PCI && I2C && VIDEO_BT848
|
||||
select DVB_MT352
|
||||
select DVB_SP887X
|
||||
select DVB_NXT6000
|
||||
|
|
|
@ -115,7 +115,7 @@ static int is_pci_slot_eq(struct pci_dev* adev, struct pci_dev* bdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct bt878 __init *dvb_bt8xx_878_match(unsigned int bttv_nr, struct pci_dev* bttv_pci_dev)
|
||||
static struct bt878 __devinit *dvb_bt8xx_878_match(unsigned int bttv_nr, struct pci_dev* bttv_pci_dev)
|
||||
{
|
||||
unsigned int card_nr;
|
||||
|
||||
|
@ -709,7 +709,7 @@ static void frontend_init(struct dvb_bt8xx_card *card, u32 type)
|
|||
}
|
||||
}
|
||||
|
||||
static int __init dvb_bt8xx_load_card(struct dvb_bt8xx_card *card, u32 type)
|
||||
static int __devinit dvb_bt8xx_load_card(struct dvb_bt8xx_card *card, u32 type)
|
||||
{
|
||||
int result;
|
||||
|
||||
|
@ -794,7 +794,7 @@ static int __init dvb_bt8xx_load_card(struct dvb_bt8xx_card *card, u32 type)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int dvb_bt8xx_probe(struct bttv_sub_device *sub)
|
||||
static int __devinit dvb_bt8xx_probe(struct bttv_sub_device *sub)
|
||||
{
|
||||
struct dvb_bt8xx_card *card;
|
||||
struct pci_dev* bttv_pci_dev;
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
config DVB_USB
|
||||
tristate "Support for various USB DVB devices"
|
||||
depends on DVB_CORE && USB
|
||||
depends on DVB_CORE && USB && I2C
|
||||
select FW_LOADER
|
||||
help
|
||||
By enabling this you will be able to choose the various supported
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
config DVB_PLUTO2
|
||||
tristate "Pluto2 cards"
|
||||
depends on DVB_CORE && PCI
|
||||
select I2C
|
||||
depends on DVB_CORE && PCI && I2C
|
||||
select I2C_ALGOBIT
|
||||
select DVB_TDA1004X
|
||||
help
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
config DVB_AV7110
|
||||
tristate "AV7110 cards"
|
||||
depends on DVB_CORE && PCI && VIDEO_V4L1
|
||||
depends on DVB_CORE && PCI && I2C && VIDEO_V4L1
|
||||
select FW_LOADER
|
||||
select VIDEO_SAA7146_VV
|
||||
select DVB_VES1820
|
||||
|
@ -58,7 +58,7 @@ config DVB_AV7110_OSD
|
|||
|
||||
config DVB_BUDGET
|
||||
tristate "Budget cards"
|
||||
depends on DVB_CORE && PCI && VIDEO_V4L1
|
||||
depends on DVB_CORE && PCI && I2C && VIDEO_V4L1
|
||||
select VIDEO_SAA7146
|
||||
select DVB_STV0299
|
||||
select DVB_VES1X93
|
||||
|
@ -79,7 +79,7 @@ config DVB_BUDGET
|
|||
|
||||
config DVB_BUDGET_CI
|
||||
tristate "Budget cards with onboard CI connector"
|
||||
depends on DVB_CORE && PCI && VIDEO_V4L1
|
||||
depends on DVB_CORE && PCI && I2C && VIDEO_V4L1
|
||||
select VIDEO_SAA7146
|
||||
select DVB_STV0297
|
||||
select DVB_STV0299
|
||||
|
@ -99,7 +99,7 @@ config DVB_BUDGET_CI
|
|||
|
||||
config DVB_BUDGET_AV
|
||||
tristate "Budget cards with analog video inputs"
|
||||
depends on DVB_CORE && PCI && VIDEO_V4L1
|
||||
depends on DVB_CORE && PCI && I2C && VIDEO_V4L1
|
||||
select VIDEO_SAA7146_VV
|
||||
select DVB_STV0299
|
||||
select DVB_TDA1004X
|
||||
|
|
|
@ -170,7 +170,7 @@ config VIDEO_VINO
|
|||
|
||||
config VIDEO_STRADIS
|
||||
tristate "Stradis 4:2:2 MPEG-2 video driver (EXPERIMENTAL)"
|
||||
depends on EXPERIMENTAL && PCI && VIDEO_V4L1
|
||||
depends on EXPERIMENTAL && PCI && VIDEO_V4L1 && !PPC64
|
||||
help
|
||||
Say Y here to enable support for the Stradis 4:2:2 MPEG-2 video
|
||||
driver for PCI. There is a product page at
|
||||
|
@ -178,7 +178,7 @@ config VIDEO_STRADIS
|
|||
|
||||
config VIDEO_ZORAN
|
||||
tristate "Zoran ZR36057/36067 Video For Linux"
|
||||
depends on PCI && I2C_ALGOBIT && VIDEO_V4L1
|
||||
depends on PCI && I2C_ALGOBIT && VIDEO_V4L1 && !PPC64
|
||||
help
|
||||
Say Y for support for MJPEG capture cards based on the Zoran
|
||||
36057/36067 PCI controller chipset. This includes the Iomega
|
||||
|
|
|
@ -11,7 +11,10 @@ tuner-objs := tuner-core.o tuner-types.o tuner-simple.o \
|
|||
msp3400-objs := msp3400-driver.o msp3400-kthreads.o
|
||||
|
||||
obj-$(CONFIG_VIDEO_DEV) += videodev.o v4l2-common.o compat_ioctl32.o
|
||||
obj-$(CONFIG_VIDEO_V4L1_COMPAT) += v4l1-compat.o
|
||||
|
||||
ifeq ($(CONFIG_VIDEO_V4L1_COMPAT),y)
|
||||
obj-$(CONFIG_VIDEO_DEV) += v4l1-compat.o
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_VIDEO_BT848) += bt8xx/
|
||||
obj-$(CONFIG_VIDEO_BT848) += tvaudio.o tda7432.o tda9875.o ir-kbd-i2c.o
|
||||
|
|
|
@ -353,7 +353,7 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
|
|||
*/
|
||||
printk(KERN_ERR "%s: unable to select block size for "
|
||||
"writing (rb%u wb%u rp%u wp%u)\n",
|
||||
md->disk->disk_name,
|
||||
mmc_card_id(card),
|
||||
1 << card->csd.read_blkbits,
|
||||
1 << card->csd.write_blkbits,
|
||||
card->csd.read_partial,
|
||||
|
|
|
@ -55,8 +55,8 @@
|
|||
|
||||
#define DRV_MODULE_NAME "bnx2"
|
||||
#define PFX DRV_MODULE_NAME ": "
|
||||
#define DRV_MODULE_VERSION "1.4.39"
|
||||
#define DRV_MODULE_RELDATE "March 22, 2006"
|
||||
#define DRV_MODULE_VERSION "1.4.40"
|
||||
#define DRV_MODULE_RELDATE "May 22, 2006"
|
||||
|
||||
#define RUN_AT(x) (jiffies + (x))
|
||||
|
||||
|
@ -2945,7 +2945,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
|
|||
int buf_size)
|
||||
{
|
||||
u32 written, offset32, len32;
|
||||
u8 *buf, start[4], end[4];
|
||||
u8 *buf, start[4], end[4], *flash_buffer = NULL;
|
||||
int rc = 0;
|
||||
int align_start, align_end;
|
||||
|
||||
|
@ -2985,12 +2985,19 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
|
|||
memcpy(buf + align_start, data_buf, buf_size);
|
||||
}
|
||||
|
||||
if (bp->flash_info->buffered == 0) {
|
||||
flash_buffer = kmalloc(264, GFP_KERNEL);
|
||||
if (flash_buffer == NULL) {
|
||||
rc = -ENOMEM;
|
||||
goto nvram_write_end;
|
||||
}
|
||||
}
|
||||
|
||||
written = 0;
|
||||
while ((written < len32) && (rc == 0)) {
|
||||
u32 page_start, page_end, data_start, data_end;
|
||||
u32 addr, cmd_flags;
|
||||
int i;
|
||||
u8 flash_buffer[264];
|
||||
|
||||
/* Find the page_start addr */
|
||||
page_start = offset32 + written;
|
||||
|
@ -3061,7 +3068,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
|
|||
}
|
||||
|
||||
/* Loop to write the new data from data_start to data_end */
|
||||
for (addr = data_start; addr < data_end; addr += 4, i++) {
|
||||
for (addr = data_start; addr < data_end; addr += 4, i += 4) {
|
||||
if ((addr == page_end - 4) ||
|
||||
((bp->flash_info->buffered) &&
|
||||
(addr == data_end - 4))) {
|
||||
|
@ -3109,6 +3116,9 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
|
|||
}
|
||||
|
||||
nvram_write_end:
|
||||
if (bp->flash_info->buffered == 0)
|
||||
kfree(flash_buffer);
|
||||
|
||||
if (align_start || align_end)
|
||||
kfree(buf);
|
||||
return rc;
|
||||
|
|
|
@ -979,6 +979,7 @@ static int sky2_rx_start(struct sky2_port *sky2)
|
|||
struct sky2_hw *hw = sky2->hw;
|
||||
unsigned rxq = rxqaddr[sky2->port];
|
||||
int i;
|
||||
unsigned thresh;
|
||||
|
||||
sky2->rx_put = sky2->rx_next = 0;
|
||||
sky2_qset(hw, rxq);
|
||||
|
@ -1003,9 +1004,21 @@ static int sky2_rx_start(struct sky2_port *sky2)
|
|||
sky2_rx_add(sky2, re->mapaddr);
|
||||
}
|
||||
|
||||
/* Truncate oversize frames */
|
||||
sky2_write16(hw, SK_REG(sky2->port, RX_GMF_TR_THR), sky2->rx_bufsize - 8);
|
||||
sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_ON);
|
||||
|
||||
/*
|
||||
* The receiver hangs if it receives frames larger than the
|
||||
* packet buffer. As a workaround, truncate oversize frames, but
|
||||
* the register is limited to 9 bits, so if you do frames > 2052
|
||||
* you better get the MTU right!
|
||||
*/
|
||||
thresh = (sky2->rx_bufsize - 8) / sizeof(u32);
|
||||
if (thresh > 0x1ff)
|
||||
sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_OFF);
|
||||
else {
|
||||
sky2_write16(hw, SK_REG(sky2->port, RX_GMF_TR_THR), thresh);
|
||||
sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_ON);
|
||||
}
|
||||
|
||||
|
||||
/* Tell chip about available buffers */
|
||||
sky2_write16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX), sky2->rx_put);
|
||||
|
|
|
@ -69,8 +69,8 @@
|
|||
|
||||
#define DRV_MODULE_NAME "tg3"
|
||||
#define PFX DRV_MODULE_NAME ": "
|
||||
#define DRV_MODULE_VERSION "3.57"
|
||||
#define DRV_MODULE_RELDATE "Apr 28, 2006"
|
||||
#define DRV_MODULE_VERSION "3.58"
|
||||
#define DRV_MODULE_RELDATE "May 22, 2006"
|
||||
|
||||
#define TG3_DEF_MAC_MODE 0
|
||||
#define TG3_DEF_RX_MODE 0
|
||||
|
@ -6488,6 +6488,10 @@ static void tg3_periodic_fetch_stats(struct tg3 *tp)
|
|||
TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
|
||||
TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
|
||||
TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
|
||||
|
||||
TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
|
||||
TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
|
||||
TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
|
||||
}
|
||||
|
||||
static void tg3_timer(unsigned long __opaque)
|
||||
|
|
|
@ -3643,6 +3643,8 @@ static void ata_pio_block(struct ata_port *ap)
|
|||
|
||||
ata_pio_sector(qc);
|
||||
}
|
||||
|
||||
ata_altstatus(ap); /* flush */
|
||||
}
|
||||
|
||||
static void ata_pio_error(struct ata_port *ap)
|
||||
|
@ -3759,11 +3761,14 @@ static void atapi_packet_task(void *_data)
|
|||
spin_lock_irqsave(&ap->host_set->lock, flags);
|
||||
ap->flags &= ~ATA_FLAG_NOINTR;
|
||||
ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
|
||||
ata_altstatus(ap); /* flush */
|
||||
|
||||
if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
|
||||
ap->ops->bmdma_start(qc); /* initiate bmdma */
|
||||
spin_unlock_irqrestore(&ap->host_set->lock, flags);
|
||||
} else {
|
||||
ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
|
||||
ata_altstatus(ap); /* flush */
|
||||
|
||||
/* PIO commands are handled by polling */
|
||||
ap->hsm_task_state = HSM_ST;
|
||||
|
|
3
fs/bio.c
3
fs/bio.c
|
@ -1116,6 +1116,9 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors)
|
|||
bp->bio1.bi_io_vec = &bp->bv1;
|
||||
bp->bio2.bi_io_vec = &bp->bv2;
|
||||
|
||||
bp->bio1.bi_max_vecs = 1;
|
||||
bp->bio2.bi_max_vecs = 1;
|
||||
|
||||
bp->bio1.bi_end_io = bio_pair_end_1;
|
||||
bp->bio2.bi_end_io = bio_pair_end_2;
|
||||
|
||||
|
|
|
@ -542,7 +542,7 @@ static int metapage_readpage(struct file *fp, struct page *page)
|
|||
static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
|
||||
{
|
||||
struct metapage *mp;
|
||||
int busy = 0;
|
||||
int ret = 1;
|
||||
unsigned int offset;
|
||||
|
||||
for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) {
|
||||
|
@ -552,30 +552,20 @@ static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
|
|||
continue;
|
||||
|
||||
jfs_info("metapage_releasepage: mp = 0x%p", mp);
|
||||
if (mp->count || mp->nohomeok) {
|
||||
if (mp->count || mp->nohomeok ||
|
||||
test_bit(META_dirty, &mp->flag)) {
|
||||
jfs_info("count = %ld, nohomeok = %d", mp->count,
|
||||
mp->nohomeok);
|
||||
busy = 1;
|
||||
ret = 0;
|
||||
continue;
|
||||
}
|
||||
wait_on_page_writeback(page);
|
||||
//WARN_ON(test_bit(META_dirty, &mp->flag));
|
||||
if (test_bit(META_dirty, &mp->flag)) {
|
||||
dump_mem("dirty mp in metapage_releasepage", mp,
|
||||
sizeof(struct metapage));
|
||||
dump_mem("page", page, sizeof(struct page));
|
||||
dump_stack();
|
||||
}
|
||||
if (mp->lsn)
|
||||
remove_from_logsync(mp);
|
||||
remove_metapage(page, mp);
|
||||
INCREMENT(mpStat.pagefree);
|
||||
free_metapage(mp);
|
||||
}
|
||||
if (busy)
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void metapage_invalidatepage(struct page *page, unsigned long offset)
|
||||
|
|
|
@ -1066,9 +1066,11 @@ exp_pseudoroot(struct auth_domain *clp, struct svc_fh *fhp,
|
|||
rv = nfserr_perm;
|
||||
else if (IS_ERR(exp))
|
||||
rv = nfserrno(PTR_ERR(exp));
|
||||
else
|
||||
else {
|
||||
rv = fh_compose(fhp, exp,
|
||||
fsid_key->ek_dentry, NULL);
|
||||
exp_put(exp);
|
||||
}
|
||||
cache_put(&fsid_key->h, &svc_expkey_cache);
|
||||
return rv;
|
||||
}
|
||||
|
|
|
@ -321,8 +321,10 @@
|
|||
#define __NR_readlinkat 296
|
||||
#define __NR_fchmodat 297
|
||||
#define __NR_faccessat 298
|
||||
#define __NR_get_robust_list 299
|
||||
#define __NR_set_robust_list 300
|
||||
|
||||
#define __NR_syscalls 299
|
||||
#define __NR_syscalls 301
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#define __NR__exit __NR_exit
|
||||
|
|
|
@ -4,7 +4,146 @@
|
|||
#include <linux/config.h>
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
#include <asm-generic/dma-mapping.h>
|
||||
|
||||
/* we implement the API below in terms of the existing PCI one,
|
||||
* so include it */
|
||||
#include <linux/pci.h>
|
||||
/* need struct page definitions */
|
||||
#include <linux/mm.h>
|
||||
|
||||
static inline int
|
||||
dma_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
BUG_ON(dev->bus != &pci_bus_type);
|
||||
|
||||
return pci_dma_supported(to_pci_dev(dev), mask);
|
||||
}
|
||||
|
||||
static inline int
|
||||
dma_set_mask(struct device *dev, u64 dma_mask)
|
||||
{
|
||||
BUG_ON(dev->bus != &pci_bus_type);
|
||||
|
||||
return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
|
||||
}
|
||||
|
||||
static inline void *
|
||||
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
gfp_t flag)
|
||||
{
|
||||
BUG_ON(dev->bus != &pci_bus_type);
|
||||
|
||||
return pci_iommu_ops->alloc_consistent(to_pci_dev(dev), size, dma_handle, flag);
|
||||
}
|
||||
|
||||
static inline void
|
||||
dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t dma_handle)
|
||||
{
|
||||
BUG_ON(dev->bus != &pci_bus_type);
|
||||
|
||||
pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle);
|
||||
}
|
||||
|
||||
static inline dma_addr_t
|
||||
dma_map_single(struct device *dev, void *cpu_addr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG_ON(dev->bus != &pci_bus_type);
|
||||
|
||||
return pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction);
|
||||
}
|
||||
|
||||
static inline void
|
||||
dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG_ON(dev->bus != &pci_bus_type);
|
||||
|
||||
pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction);
|
||||
}
|
||||
|
||||
static inline dma_addr_t
|
||||
dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG_ON(dev->bus != &pci_bus_type);
|
||||
|
||||
return pci_map_page(to_pci_dev(dev), page, offset, size, (int)direction);
|
||||
}
|
||||
|
||||
static inline void
|
||||
dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG_ON(dev->bus != &pci_bus_type);
|
||||
|
||||
pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction);
|
||||
}
|
||||
|
||||
static inline int
|
||||
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG_ON(dev->bus != &pci_bus_type);
|
||||
|
||||
return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction);
|
||||
}
|
||||
|
||||
static inline void
|
||||
dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG_ON(dev->bus != &pci_bus_type);
|
||||
|
||||
pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction);
|
||||
}
|
||||
|
||||
static inline void
|
||||
dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG_ON(dev->bus != &pci_bus_type);
|
||||
|
||||
pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle,
|
||||
size, (int)direction);
|
||||
}
|
||||
|
||||
static inline void
|
||||
dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG_ON(dev->bus != &pci_bus_type);
|
||||
|
||||
pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle,
|
||||
size, (int)direction);
|
||||
}
|
||||
|
||||
static inline void
|
||||
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG_ON(dev->bus != &pci_bus_type);
|
||||
|
||||
pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg, nelems, (int)direction);
|
||||
}
|
||||
|
||||
static inline void
|
||||
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG_ON(dev->bus != &pci_bus_type);
|
||||
|
||||
pci_dma_sync_sg_for_device(to_pci_dev(dev), sg, nelems, (int)direction);
|
||||
}
|
||||
|
||||
static inline int
|
||||
dma_mapping_error(dma_addr_t dma_addr)
|
||||
{
|
||||
return pci_dma_mapping_error(dma_addr);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
struct device;
|
||||
|
|
|
@ -42,7 +42,7 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
|
|||
struct pci_dev;
|
||||
|
||||
struct pci_iommu_ops {
|
||||
void *(*alloc_consistent)(struct pci_dev *, size_t, dma_addr_t *);
|
||||
void *(*alloc_consistent)(struct pci_dev *, size_t, dma_addr_t *, gfp_t);
|
||||
void (*free_consistent)(struct pci_dev *, size_t, void *, dma_addr_t);
|
||||
dma_addr_t (*map_single)(struct pci_dev *, void *, size_t, int);
|
||||
void (*unmap_single)(struct pci_dev *, dma_addr_t, size_t, int);
|
||||
|
@ -59,7 +59,7 @@ extern struct pci_iommu_ops *pci_iommu_ops;
|
|||
*/
|
||||
static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle)
|
||||
{
|
||||
return pci_iommu_ops->alloc_consistent(hwdev, size, dma_handle);
|
||||
return pci_iommu_ops->alloc_consistent(hwdev, size, dma_handle, GFP_ATOMIC);
|
||||
}
|
||||
|
||||
/* Free and unmap a consistent DMA buffer.
|
||||
|
|
|
@ -213,6 +213,10 @@ extern int dir_notify_enable;
|
|||
#define FIBMAP _IO(0x00,1) /* bmap access */
|
||||
#define FIGETBSZ _IO(0x00,2) /* get the block size used for bmap */
|
||||
|
||||
#define SYNC_FILE_RANGE_WAIT_BEFORE 1
|
||||
#define SYNC_FILE_RANGE_WRITE 2
|
||||
#define SYNC_FILE_RANGE_WAIT_AFTER 4
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
@ -758,9 +762,6 @@ extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg);
|
|||
extern int fcntl_getlease(struct file *filp);
|
||||
|
||||
/* fs/sync.c */
|
||||
#define SYNC_FILE_RANGE_WAIT_BEFORE 1
|
||||
#define SYNC_FILE_RANGE_WRITE 2
|
||||
#define SYNC_FILE_RANGE_WAIT_AFTER 4
|
||||
extern int do_sync_file_range(struct file *file, loff_t offset, loff_t endbyte,
|
||||
unsigned int flags);
|
||||
|
||||
|
|
|
@ -52,6 +52,7 @@ struct utimbuf;
|
|||
struct mq_attr;
|
||||
struct compat_stat;
|
||||
struct compat_timeval;
|
||||
struct robust_list_head;
|
||||
|
||||
#include <linux/config.h>
|
||||
#include <linux/types.h>
|
||||
|
@ -581,5 +582,10 @@ asmlinkage long sys_tee(int fdin, int fdout, size_t len, unsigned int flags);
|
|||
|
||||
asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes,
|
||||
unsigned int flags);
|
||||
asmlinkage long sys_get_robust_list(int pid,
|
||||
struct robust_list_head __user **head_ptr,
|
||||
size_t __user *len_ptr);
|
||||
asmlinkage long sys_set_robust_list(struct robust_list_head __user *head,
|
||||
size_t len);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -112,7 +112,7 @@ struct lsap_cb {
|
|||
|
||||
struct timer_list watchdog_timer;
|
||||
|
||||
IRLMP_STATE lsap_state; /* Connection state */
|
||||
LSAP_STATE lsap_state; /* Connection state */
|
||||
notify_t notify; /* Indication/Confirm entry points */
|
||||
struct qos_info qos; /* QoS for this connection */
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ static int __init br_init(void)
|
|||
|
||||
static void __exit br_deinit(void)
|
||||
{
|
||||
llc_sap_close(br_stp_sap);
|
||||
rcu_assign_pointer(br_stp_sap->rcv_func, NULL);
|
||||
|
||||
#ifdef CONFIG_BRIDGE_NETFILTER
|
||||
br_netfilter_fini();
|
||||
|
@ -67,6 +67,7 @@ static void __exit br_deinit(void)
|
|||
|
||||
synchronize_net();
|
||||
|
||||
llc_sap_put(br_stp_sap);
|
||||
br_fdb_get_hook = NULL;
|
||||
br_fdb_put_hook = NULL;
|
||||
|
||||
|
|
|
@ -210,7 +210,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
|
|||
skb->h.icmph->code != ICMP_FRAG_NEEDED)
|
||||
return;
|
||||
|
||||
spi = ntohl(ntohs(ipch->cpi));
|
||||
spi = htonl(ntohs(ipch->cpi));
|
||||
x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr,
|
||||
spi, IPPROTO_COMP, AF_INET);
|
||||
if (!x)
|
||||
|
|
|
@ -528,14 +528,15 @@ int decode_seq(bitstr_t * bs, field_t * f, char *base, int level)
|
|||
|
||||
/* Decode */
|
||||
if ((err = (Decoders[son->type]) (bs, son, base,
|
||||
level + 1)) >
|
||||
H323_ERROR_STOP)
|
||||
level + 1)) <
|
||||
H323_ERROR_NONE)
|
||||
return err;
|
||||
|
||||
bs->cur = beg + len;
|
||||
bs->bit = 0;
|
||||
} else if ((err = (Decoders[son->type]) (bs, son, base,
|
||||
level + 1)))
|
||||
level + 1)) <
|
||||
H323_ERROR_NONE)
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -554,7 +555,7 @@ int decode_seq(bitstr_t * bs, field_t * f, char *base, int level)
|
|||
|
||||
/* Decode the extension components */
|
||||
for (opt = 0; opt < bmp2_len; opt++, i++, son++) {
|
||||
if (son->attr & STOP) {
|
||||
if (i < f->ub && son->attr & STOP) {
|
||||
PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ",
|
||||
son->name);
|
||||
return H323_ERROR_STOP;
|
||||
|
@ -584,8 +585,8 @@ int decode_seq(bitstr_t * bs, field_t * f, char *base, int level)
|
|||
beg = bs->cur;
|
||||
|
||||
if ((err = (Decoders[son->type]) (bs, son, base,
|
||||
level + 1)) >
|
||||
H323_ERROR_STOP)
|
||||
level + 1)) <
|
||||
H323_ERROR_NONE)
|
||||
return err;
|
||||
|
||||
bs->cur = beg + len;
|
||||
|
@ -660,18 +661,20 @@ int decode_seqof(bitstr_t * bs, field_t * f, char *base, int level)
|
|||
i <
|
||||
effective_count ?
|
||||
base : NULL,
|
||||
level + 1)) >
|
||||
H323_ERROR_STOP)
|
||||
level + 1)) <
|
||||
H323_ERROR_NONE)
|
||||
return err;
|
||||
|
||||
bs->cur = beg + len;
|
||||
bs->bit = 0;
|
||||
} else
|
||||
if ((err = (Decoders[son->type]) (bs, son,
|
||||
i < effective_count ?
|
||||
base : NULL,
|
||||
level + 1)))
|
||||
return err;
|
||||
if ((err = (Decoders[son->type]) (bs, son,
|
||||
i <
|
||||
effective_count ?
|
||||
base : NULL,
|
||||
level + 1)) <
|
||||
H323_ERROR_NONE)
|
||||
return err;
|
||||
|
||||
if (base)
|
||||
base += son->offset;
|
||||
|
@ -735,13 +738,14 @@ int decode_choice(bitstr_t * bs, field_t * f, char *base, int level)
|
|||
}
|
||||
beg = bs->cur;
|
||||
|
||||
if ((err = (Decoders[son->type]) (bs, son, base, level + 1)) >
|
||||
H323_ERROR_STOP)
|
||||
if ((err = (Decoders[son->type]) (bs, son, base, level + 1)) <
|
||||
H323_ERROR_NONE)
|
||||
return err;
|
||||
|
||||
bs->cur = beg + len;
|
||||
bs->bit = 0;
|
||||
} else if ((err = (Decoders[son->type]) (bs, son, base, level + 1)))
|
||||
} else if ((err = (Decoders[son->type]) (bs, son, base, level + 1)) <
|
||||
H323_ERROR_NONE)
|
||||
return err;
|
||||
|
||||
return H323_ERROR_NONE;
|
||||
|
|
|
@ -768,6 +768,7 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
|
|||
len *= sizeof(unsigned long);
|
||||
*obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC);
|
||||
if (*obj == NULL) {
|
||||
kfree(lp);
|
||||
kfree(id);
|
||||
if (net_ratelimit())
|
||||
printk("OOM in bsalg (%d)\n", __LINE__);
|
||||
|
@ -1003,12 +1004,12 @@ static unsigned char snmp_trap_decode(struct asn1_ctx *ctx,
|
|||
|
||||
return 1;
|
||||
|
||||
err_addr_free:
|
||||
kfree((unsigned long *)trap->ip_address);
|
||||
|
||||
err_id_free:
|
||||
kfree(trap->id);
|
||||
|
||||
err_addr_free:
|
||||
kfree((unsigned long *)trap->ip_address);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1126,11 +1127,10 @@ static int snmp_parse_mangle(unsigned char *msg,
|
|||
struct snmp_v1_trap trap;
|
||||
unsigned char ret = snmp_trap_decode(&ctx, &trap, map, check);
|
||||
|
||||
/* Discard trap allocations regardless */
|
||||
kfree(trap.id);
|
||||
kfree((unsigned long *)trap.ip_address);
|
||||
|
||||
if (!ret)
|
||||
if (ret) {
|
||||
kfree(trap.id);
|
||||
kfree((unsigned long *)trap.ip_address);
|
||||
} else
|
||||
return ret;
|
||||
|
||||
} else {
|
||||
|
|
|
@ -221,7 +221,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl)
|
|||
if (pskb_may_pull(skb, xprth + 4 - skb->data)) {
|
||||
u16 *ipcomp_hdr = (u16 *)xprth;
|
||||
|
||||
fl->fl_ipsec_spi = ntohl(ntohs(ipcomp_hdr[1]));
|
||||
fl->fl_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
|
||||
}
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -208,7 +208,7 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
|||
if (type != ICMPV6_DEST_UNREACH && type != ICMPV6_PKT_TOOBIG)
|
||||
return;
|
||||
|
||||
spi = ntohl(ntohs(ipcomph->cpi));
|
||||
spi = htonl(ntohs(ipcomph->cpi));
|
||||
x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, spi, IPPROTO_COMP, AF_INET6);
|
||||
if (!x)
|
||||
return;
|
||||
|
|
|
@ -544,7 +544,8 @@ static void iriap_getvaluebyclass_response(struct iriap_cb *self,
|
|||
{
|
||||
struct sk_buff *tx_skb;
|
||||
int n;
|
||||
__u32 tmp_be32, tmp_be16;
|
||||
__u32 tmp_be32;
|
||||
__be16 tmp_be16;
|
||||
__u8 *fp;
|
||||
|
||||
IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
|
||||
|
|
|
@ -159,6 +159,7 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
|
|||
detail->update(tmp, new);
|
||||
tmp->next = *head;
|
||||
*head = tmp;
|
||||
detail->entries++;
|
||||
cache_get(tmp);
|
||||
is_new = cache_fresh_locked(tmp, new->expiry_time);
|
||||
cache_fresh_locked(old, 0);
|
||||
|
|
|
@ -62,7 +62,7 @@ int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, u32 *spi, u32 *seq)
|
|||
case IPPROTO_COMP:
|
||||
if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr)))
|
||||
return -EINVAL;
|
||||
*spi = ntohl(ntohs(*(u16*)(skb->h.raw + 2)));
|
||||
*spi = htonl(ntohs(*(u16*)(skb->h.raw + 2)));
|
||||
*seq = 0;
|
||||
return 0;
|
||||
default:
|
||||
|
|
Loading…
Reference in New Issue