Merge branch 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6

* 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6: (54 commits)
  [S390] tape: Use pr_xxx instead of dev_xxx in shared driver code
  [S390] Wire up page fault events for software perf counters.
  [S390] Remove smp_cpu_not_running.
  [S390] Get rid of cpuid.h header file.
  [S390] Limit cpu detection to 256 physical cpus.
  [S390] tape: Fix device online messages
  [S390] Enable guest page hinting by default.
  [S390] use generic scatterlist.h
  [S390] s390dbf: Add description for usage of "%s" in sprintf events
  [S390] Initialize __LC_THREAD_INFO early.
  [S390] fix recursive locking on page_table_lock
  [S390] kvm: use console_initcall() to initialize s390 virtio console
  [S390] tape: reversed order of labels
  [S390] hypfs: Use "%u" instead of "%d" for unsigned ints in snprintf
  [S390] kernel: Print an error message if kernel NSS cannot be defined
  [S390] zcrypt: Free ap_device if dev_set_name fails.
  [S390] zcrypt: Use spin_lock_bh in suspend callback
  [S390] xpram: Remove checksum validation for suspend/resume
  [S390] vmur: Invalid allocation sequence for vmur class
  [S390] hypfs: remove useless variable qname
  ...
This commit is contained in:
Linus Torvalds 2009-09-11 09:16:39 -07:00
commit 89af571ca6
91 changed files with 1439 additions and 1326 deletions

View File

@ -495,6 +495,13 @@ and for each vararg a long value. So e.g. for a debug entry with a format
string plus two varargs one would need to allocate a (3 * sizeof(long))
byte data area in the debug_register() function.
IMPORTANT: Using "%s" in sprintf event functions is dangerous. You can only
use "%s" in the sprintf event functions, if the memory for the passed string is
available as long as the debug feature exists. The reason behind this is that
due to performance considerations only a pointer to the string is stored in
the debug feature. If you log a string that is freed afterwards, you will get
an OOPS when inspecting the debug feature, because then the debug feature will
access the already freed memory.
NOTE: If using the sprintf view do NOT use other event/exception functions
than the sprintf-event and -exception functions.

View File

@ -19,6 +19,7 @@ Currently, these files might (depending on your configuration)
show up in /proc/sys/kernel:
- acpi_video_flags
- acct
- callhome [ S390 only ]
- auto_msgmni
- core_pattern
- core_uses_pid
@ -91,6 +92,21 @@ valid for 30 seconds.
==============================================================
callhome:
Controls the kernel's callhome behavior in case of a kernel panic.
The s390 hardware allows an operating system to send a notification
to a service organization (callhome) in case of an operating system panic.
When the value in this file is 0 (which is the default behavior)
nothing happens in case of a kernel panic. If this value is set to "1"
the complete kernel oops message is send to the IBM customer service
organization in case the mainframe the Linux operating system is running
on has a service contract with IBM.
==============================================================
core_pattern:
core_pattern is used to specify a core dumpfile pattern name.

View File

@ -95,7 +95,6 @@ config S390
select HAVE_ARCH_TRACEHOOK
select INIT_ALL_POSSIBLE
select HAVE_PERF_COUNTERS
select GENERIC_ATOMIC64 if !64BIT
config SCHED_OMIT_FRAME_POINTER
bool
@ -481,13 +480,6 @@ config CMM_IUCV
Select this option to enable the special message interface to
the cooperative memory management.
config PAGE_STATES
bool "Unused page notification"
help
This enables the notification of unused pages to the
hypervisor. The ESSA instruction is used to do the states
changes between a page that has content and the unused state.
config APPLDATA_BASE
bool "Linux - VM Monitor Stream, base infrastructure"
depends on PROC_FS

View File

@ -88,8 +88,7 @@ LDFLAGS_vmlinux := -e start
head-y := arch/s390/kernel/head.o arch/s390/kernel/init_task.o
core-y += arch/s390/mm/ arch/s390/kernel/ arch/s390/crypto/ \
arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/ \
arch/s390/power/
arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/
libs-y += arch/s390/lib/
drivers-y += drivers/s390/

View File

@ -355,11 +355,7 @@ static struct dentry *hypfs_create_file(struct super_block *sb,
{
struct dentry *dentry;
struct inode *inode;
struct qstr qname;
qname.name = name;
qname.len = strlen(name);
qname.hash = full_name_hash(name, qname.len);
mutex_lock(&parent->d_inode->i_mutex);
dentry = lookup_one_len(name, parent, strlen(name));
if (IS_ERR(dentry)) {
@ -426,7 +422,7 @@ struct dentry *hypfs_create_u64(struct super_block *sb, struct dentry *dir,
char tmp[TMP_SIZE];
struct dentry *dentry;
snprintf(tmp, TMP_SIZE, "%lld\n", (unsigned long long int)value);
snprintf(tmp, TMP_SIZE, "%llu\n", (unsigned long long int)value);
buffer = kstrdup(tmp, GFP_KERNEL);
if (!buffer)
return ERR_PTR(-ENOMEM);

View File

@ -1,33 +1,23 @@
#ifndef __ARCH_S390_ATOMIC__
#define __ARCH_S390_ATOMIC__
/*
* Copyright 1999,2009 IBM Corp.
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Denis Joseph Barrow,
* Arnd Bergmann <arndb@de.ibm.com>,
*
* Atomic operations that C can't guarantee us.
* Useful for resource counting etc.
* s390 uses 'Compare And Swap' for atomicity in SMP enviroment.
*
*/
#include <linux/compiler.h>
#include <linux/types.h>
/*
* include/asm-s390/atomic.h
*
* S390 version
* Copyright (C) 1999-2005 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
* Denis Joseph Barrow,
* Arnd Bergmann (arndb@de.ibm.com)
*
* Derived from "include/asm-i386/bitops.h"
* Copyright (C) 1992, Linus Torvalds
*
*/
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
* S390 uses 'Compare And Swap' for atomicity in SMP enviroment
*/
#define ATOMIC_INIT(i) { (i) }
#ifdef __KERNEL__
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
#define __CS_LOOP(ptr, op_val, op_string) ({ \
@ -77,7 +67,7 @@ static inline void atomic_set(atomic_t *v, int i)
barrier();
}
static __inline__ int atomic_add_return(int i, atomic_t * v)
static inline int atomic_add_return(int i, atomic_t *v)
{
return __CS_LOOP(v, i, "ar");
}
@ -87,7 +77,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
#define atomic_inc_return(_v) atomic_add_return(1, _v)
#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
static __inline__ int atomic_sub_return(int i, atomic_t * v)
static inline int atomic_sub_return(int i, atomic_t *v)
{
return __CS_LOOP(v, i, "sr");
}
@ -97,19 +87,19 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
#define atomic_dec_return(_v) atomic_sub_return(1, _v)
#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v)
static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
{
__CS_LOOP(v, ~mask, "nr");
__CS_LOOP(v, ~mask, "nr");
}
static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v)
static inline void atomic_set_mask(unsigned long mask, atomic_t *v)
{
__CS_LOOP(v, mask, "or");
__CS_LOOP(v, mask, "or");
}
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new)
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
asm volatile(
@ -127,7 +117,7 @@ static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new)
return old;
}
static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
@ -146,9 +136,10 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
#undef __CS_LOOP
#ifdef __s390x__
#define ATOMIC64_INIT(i) { (i) }
#ifdef CONFIG_64BIT
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
#define __CSG_LOOP(ptr, op_val, op_string) ({ \
@ -162,7 +153,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
: "=&d" (old_val), "=&d" (new_val), \
"=Q" (((atomic_t *)(ptr))->counter) \
: "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
: "cc", "memory" ); \
: "cc", "memory"); \
new_val; \
})
@ -180,7 +171,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
"=m" (((atomic_t *)(ptr))->counter) \
: "a" (ptr), "d" (op_val), \
"m" (((atomic_t *)(ptr))->counter) \
: "cc", "memory" ); \
: "cc", "memory"); \
new_val; \
})
@ -198,39 +189,29 @@ static inline void atomic64_set(atomic64_t *v, long long i)
barrier();
}
static __inline__ long long atomic64_add_return(long long i, atomic64_t * v)
static inline long long atomic64_add_return(long long i, atomic64_t *v)
{
return __CSG_LOOP(v, i, "agr");
}
#define atomic64_add(_i, _v) atomic64_add_return(_i, _v)
#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
#define atomic64_inc(_v) atomic64_add_return(1, _v)
#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
static __inline__ long long atomic64_sub_return(long long i, atomic64_t * v)
static inline long long atomic64_sub_return(long long i, atomic64_t *v)
{
return __CSG_LOOP(v, i, "sgr");
}
#define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v)
#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
#define atomic64_dec(_v) atomic64_sub_return(1, _v)
#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v)
static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
{
__CSG_LOOP(v, ~mask, "ngr");
__CSG_LOOP(v, ~mask, "ngr");
}
static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v)
static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
{
__CSG_LOOP(v, mask, "ogr");
__CSG_LOOP(v, mask, "ogr");
}
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
static __inline__ long long atomic64_cmpxchg(atomic64_t *v,
static inline long long atomic64_cmpxchg(atomic64_t *v,
long long old, long long new)
{
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
@ -249,8 +230,112 @@ static __inline__ long long atomic64_cmpxchg(atomic64_t *v,
return old;
}
static __inline__ int atomic64_add_unless(atomic64_t *v,
long long a, long long u)
#undef __CSG_LOOP
#else /* CONFIG_64BIT */
typedef struct {
long long counter;
} atomic64_t;
static inline long long atomic64_read(const atomic64_t *v)
{
register_pair rp;
asm volatile(
" lm %0,%N0,0(%1)"
: "=&d" (rp)
: "a" (&v->counter), "m" (v->counter)
);
return rp.pair;
}
static inline void atomic64_set(atomic64_t *v, long long i)
{
register_pair rp = {.pair = i};
asm volatile(
" stm %1,%N1,0(%2)"
: "=m" (v->counter)
: "d" (rp), "a" (&v->counter)
);
}
static inline long long atomic64_xchg(atomic64_t *v, long long new)
{
register_pair rp_new = {.pair = new};
register_pair rp_old;
asm volatile(
" lm %0,%N0,0(%2)\n"
"0: cds %0,%3,0(%2)\n"
" jl 0b\n"
: "=&d" (rp_old), "+m" (v->counter)
: "a" (&v->counter), "d" (rp_new)
: "cc");
return rp_old.pair;
}
static inline long long atomic64_cmpxchg(atomic64_t *v,
long long old, long long new)
{
register_pair rp_old = {.pair = old};
register_pair rp_new = {.pair = new};
asm volatile(
" cds %0,%3,0(%2)"
: "+&d" (rp_old), "+m" (v->counter)
: "a" (&v->counter), "d" (rp_new)
: "cc");
return rp_old.pair;
}
static inline long long atomic64_add_return(long long i, atomic64_t *v)
{
long long old, new;
do {
old = atomic64_read(v);
new = old + i;
} while (atomic64_cmpxchg(v, old, new) != old);
return new;
}
static inline long long atomic64_sub_return(long long i, atomic64_t *v)
{
long long old, new;
do {
old = atomic64_read(v);
new = old - i;
} while (atomic64_cmpxchg(v, old, new) != old);
return new;
}
static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
{
long long old, new;
do {
old = atomic64_read(v);
new = old | mask;
} while (atomic64_cmpxchg(v, old, new) != old);
}
static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
{
long long old, new;
do {
old = atomic64_read(v);
new = old & mask;
} while (atomic64_cmpxchg(v, old, new) != old);
}
#endif /* CONFIG_64BIT */
static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
{
long long c, old;
c = atomic64_read(v);
@ -265,15 +350,17 @@ static __inline__ int atomic64_add_unless(atomic64_t *v,
return c != u;
}
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
#undef __CSG_LOOP
#else /* __s390x__ */
#include <asm-generic/atomic64.h>
#endif /* __s390x__ */
#define atomic64_add(_i, _v) atomic64_add_return(_i, _v)
#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
#define atomic64_inc(_v) atomic64_add_return(1, _v)
#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
#define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v)
#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
#define atomic64_dec(_v) atomic64_sub_return(1, _v)
#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
#define smp_mb__before_atomic_dec() smp_mb()
#define smp_mb__after_atomic_dec() smp_mb()
@ -281,5 +368,5 @@ static __inline__ int atomic64_add_unless(atomic64_t *v,
#define smp_mb__after_atomic_inc() smp_mb()
#include <asm-generic/atomic-long.h>
#endif /* __KERNEL__ */
#endif /* __ARCH_S390_ATOMIC__ */

View File

@ -78,28 +78,11 @@ csum_partial_copy_nocheck (const void *src, void *dst, int len, __wsum sum)
*/
static inline __sum16 csum_fold(__wsum sum)
{
#ifndef __s390x__
register_pair rp;
u32 csum = (__force u32) sum;
asm volatile(
" slr %N1,%N1\n" /* %0 = H L */
" lr %1,%0\n" /* %0 = H L, %1 = H L 0 0 */
" srdl %1,16\n" /* %0 = H L, %1 = 0 H L 0 */
" alr %1,%N1\n" /* %0 = H L, %1 = L H L 0 */
" alr %0,%1\n" /* %0 = H+L+C L+H */
" srl %0,16\n" /* %0 = H+L+C */
: "+&d" (sum), "=d" (rp) : : "cc");
#else /* __s390x__ */
asm volatile(
" sr 3,3\n" /* %0 = H*65536 + L */
" lr 2,%0\n" /* %0 = H L, 2/3 = H L / 0 0 */
" srdl 2,16\n" /* %0 = H L, 2/3 = 0 H / L 0 */
" alr 2,3\n" /* %0 = H L, 2/3 = L H / L 0 */
" alr %0,2\n" /* %0 = H+L+C L+H */
" srl %0,16\n" /* %0 = H+L+C */
: "+&d" (sum) : : "cc", "2", "3");
#endif /* __s390x__ */
return (__force __sum16) ~sum;
csum += (csum >> 16) + (csum << 16);
csum >>= 16;
return (__force __sum16) ~csum;
}
/*

View File

@ -125,4 +125,32 @@ struct chsc_cpd_info {
#define CHSC_INFO_CPD _IOWR(CHSC_IOCTL_MAGIC, 0x87, struct chsc_cpd_info)
#define CHSC_INFO_DCAL _IOWR(CHSC_IOCTL_MAGIC, 0x88, struct chsc_dcal)
#ifdef __KERNEL__
struct css_general_char {
u64 : 12;
u32 dynio : 1; /* bit 12 */
u32 : 28;
u32 aif : 1; /* bit 41 */
u32 : 3;
u32 mcss : 1; /* bit 45 */
u32 fcs : 1; /* bit 46 */
u32 : 1;
u32 ext_mb : 1; /* bit 48 */
u32 : 7;
u32 aif_tdd : 1; /* bit 56 */
u32 : 1;
u32 qebsm : 1; /* bit 58 */
u32 : 8;
u32 aif_osa : 1; /* bit 67 */
u32 : 14;
u32 cib : 1; /* bit 82 */
u32 : 5;
u32 fcx : 1; /* bit 88 */
u32 : 7;
}__attribute__((packed));
extern struct css_general_char css_general_characteristics;
#endif /* __KERNEL__ */
#endif

View File

@ -15,228 +15,7 @@
#define LPM_ANYPATH 0xff
#define __MAX_CSSID 0
/**
* struct cmd_scsw - command-mode subchannel status word
* @key: subchannel key
* @sctl: suspend control
* @eswf: esw format
* @cc: deferred condition code
* @fmt: format
* @pfch: prefetch
* @isic: initial-status interruption control
* @alcc: address-limit checking control
* @ssi: suppress-suspended interruption
* @zcc: zero condition code
* @ectl: extended control
* @pno: path not operational
* @res: reserved
* @fctl: function control
* @actl: activity control
* @stctl: status control
* @cpa: channel program address
* @dstat: device status
* @cstat: subchannel status
* @count: residual count
*/
struct cmd_scsw {
__u32 key : 4;
__u32 sctl : 1;
__u32 eswf : 1;
__u32 cc : 2;
__u32 fmt : 1;
__u32 pfch : 1;
__u32 isic : 1;
__u32 alcc : 1;
__u32 ssi : 1;
__u32 zcc : 1;
__u32 ectl : 1;
__u32 pno : 1;
__u32 res : 1;
__u32 fctl : 3;
__u32 actl : 7;
__u32 stctl : 5;
__u32 cpa;
__u32 dstat : 8;
__u32 cstat : 8;
__u32 count : 16;
} __attribute__ ((packed));
/**
* struct tm_scsw - transport-mode subchannel status word
* @key: subchannel key
* @eswf: esw format
* @cc: deferred condition code
* @fmt: format
* @x: IRB-format control
* @q: interrogate-complete
* @ectl: extended control
* @pno: path not operational
* @fctl: function control
* @actl: activity control
* @stctl: status control
* @tcw: TCW address
* @dstat: device status
* @cstat: subchannel status
* @fcxs: FCX status
* @schxs: subchannel-extended status
*/
struct tm_scsw {
u32 key:4;
u32 :1;
u32 eswf:1;
u32 cc:2;
u32 fmt:3;
u32 x:1;
u32 q:1;
u32 :1;
u32 ectl:1;
u32 pno:1;
u32 :1;
u32 fctl:3;
u32 actl:7;
u32 stctl:5;
u32 tcw;
u32 dstat:8;
u32 cstat:8;
u32 fcxs:8;
u32 schxs:8;
} __attribute__ ((packed));
/**
* union scsw - subchannel status word
* @cmd: command-mode SCSW
* @tm: transport-mode SCSW
*/
union scsw {
struct cmd_scsw cmd;
struct tm_scsw tm;
} __attribute__ ((packed));
int scsw_is_tm(union scsw *scsw);
u32 scsw_key(union scsw *scsw);
u32 scsw_eswf(union scsw *scsw);
u32 scsw_cc(union scsw *scsw);
u32 scsw_ectl(union scsw *scsw);
u32 scsw_pno(union scsw *scsw);
u32 scsw_fctl(union scsw *scsw);
u32 scsw_actl(union scsw *scsw);
u32 scsw_stctl(union scsw *scsw);
u32 scsw_dstat(union scsw *scsw);
u32 scsw_cstat(union scsw *scsw);
int scsw_is_solicited(union scsw *scsw);
int scsw_is_valid_key(union scsw *scsw);
int scsw_is_valid_eswf(union scsw *scsw);
int scsw_is_valid_cc(union scsw *scsw);
int scsw_is_valid_ectl(union scsw *scsw);
int scsw_is_valid_pno(union scsw *scsw);
int scsw_is_valid_fctl(union scsw *scsw);
int scsw_is_valid_actl(union scsw *scsw);
int scsw_is_valid_stctl(union scsw *scsw);
int scsw_is_valid_dstat(union scsw *scsw);
int scsw_is_valid_cstat(union scsw *scsw);
int scsw_cmd_is_valid_key(union scsw *scsw);
int scsw_cmd_is_valid_sctl(union scsw *scsw);
int scsw_cmd_is_valid_eswf(union scsw *scsw);
int scsw_cmd_is_valid_cc(union scsw *scsw);
int scsw_cmd_is_valid_fmt(union scsw *scsw);
int scsw_cmd_is_valid_pfch(union scsw *scsw);
int scsw_cmd_is_valid_isic(union scsw *scsw);
int scsw_cmd_is_valid_alcc(union scsw *scsw);
int scsw_cmd_is_valid_ssi(union scsw *scsw);
int scsw_cmd_is_valid_zcc(union scsw *scsw);
int scsw_cmd_is_valid_ectl(union scsw *scsw);
int scsw_cmd_is_valid_pno(union scsw *scsw);
int scsw_cmd_is_valid_fctl(union scsw *scsw);
int scsw_cmd_is_valid_actl(union scsw *scsw);
int scsw_cmd_is_valid_stctl(union scsw *scsw);
int scsw_cmd_is_valid_dstat(union scsw *scsw);
int scsw_cmd_is_valid_cstat(union scsw *scsw);
int scsw_cmd_is_solicited(union scsw *scsw);
int scsw_tm_is_valid_key(union scsw *scsw);
int scsw_tm_is_valid_eswf(union scsw *scsw);
int scsw_tm_is_valid_cc(union scsw *scsw);
int scsw_tm_is_valid_fmt(union scsw *scsw);
int scsw_tm_is_valid_x(union scsw *scsw);
int scsw_tm_is_valid_q(union scsw *scsw);
int scsw_tm_is_valid_ectl(union scsw *scsw);
int scsw_tm_is_valid_pno(union scsw *scsw);
int scsw_tm_is_valid_fctl(union scsw *scsw);
int scsw_tm_is_valid_actl(union scsw *scsw);
int scsw_tm_is_valid_stctl(union scsw *scsw);
int scsw_tm_is_valid_dstat(union scsw *scsw);
int scsw_tm_is_valid_cstat(union scsw *scsw);
int scsw_tm_is_valid_fcxs(union scsw *scsw);
int scsw_tm_is_valid_schxs(union scsw *scsw);
int scsw_tm_is_solicited(union scsw *scsw);
#define SCSW_FCTL_CLEAR_FUNC 0x1
#define SCSW_FCTL_HALT_FUNC 0x2
#define SCSW_FCTL_START_FUNC 0x4
#define SCSW_ACTL_SUSPENDED 0x1
#define SCSW_ACTL_DEVACT 0x2
#define SCSW_ACTL_SCHACT 0x4
#define SCSW_ACTL_CLEAR_PEND 0x8
#define SCSW_ACTL_HALT_PEND 0x10
#define SCSW_ACTL_START_PEND 0x20
#define SCSW_ACTL_RESUME_PEND 0x40
#define SCSW_STCTL_STATUS_PEND 0x1
#define SCSW_STCTL_SEC_STATUS 0x2
#define SCSW_STCTL_PRIM_STATUS 0x4
#define SCSW_STCTL_INTER_STATUS 0x8
#define SCSW_STCTL_ALERT_STATUS 0x10
#define DEV_STAT_ATTENTION 0x80
#define DEV_STAT_STAT_MOD 0x40
#define DEV_STAT_CU_END 0x20
#define DEV_STAT_BUSY 0x10
#define DEV_STAT_CHN_END 0x08
#define DEV_STAT_DEV_END 0x04
#define DEV_STAT_UNIT_CHECK 0x02
#define DEV_STAT_UNIT_EXCEP 0x01
#define SCHN_STAT_PCI 0x80
#define SCHN_STAT_INCORR_LEN 0x40
#define SCHN_STAT_PROG_CHECK 0x20
#define SCHN_STAT_PROT_CHECK 0x10
#define SCHN_STAT_CHN_DATA_CHK 0x08
#define SCHN_STAT_CHN_CTRL_CHK 0x04
#define SCHN_STAT_INTF_CTRL_CHK 0x02
#define SCHN_STAT_CHAIN_CHECK 0x01
/*
* architectured values for first sense byte
*/
#define SNS0_CMD_REJECT 0x80
#define SNS_CMD_REJECT SNS0_CMD_REJEC
#define SNS0_INTERVENTION_REQ 0x40
#define SNS0_BUS_OUT_CHECK 0x20
#define SNS0_EQUIPMENT_CHECK 0x10
#define SNS0_DATA_CHECK 0x08
#define SNS0_OVERRUN 0x04
#define SNS0_INCOMPL_DOMAIN 0x01
/*
* architectured values for second sense byte
*/
#define SNS1_PERM_ERR 0x80
#define SNS1_INV_TRACK_FORMAT 0x40
#define SNS1_EOC 0x20
#define SNS1_MESSAGE_TO_OPER 0x10
#define SNS1_NO_REC_FOUND 0x08
#define SNS1_FILE_PROTECTED 0x04
#define SNS1_WRITE_INHIBITED 0x02
#define SNS1_INPRECISE_END 0x01
/*
* architectured values for third sense byte
*/
#define SNS2_REQ_INH_WRITE 0x80
#define SNS2_CORRECTABLE 0x40
#define SNS2_FIRST_LOG_ERR 0x20
#define SNS2_ENV_DATA_PRESENT 0x10
#define SNS2_INPRECISE_END 0x04
#include <asm/scsw.h>
/**
* struct ccw1 - channel command word

View File

@ -0,0 +1,26 @@
/*
* Copyright IBM Corp. 2000,2009
* Author(s): Hartmut Penner <hp@de.ibm.com>,
* Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Christian Ehrhardt <ehrhardt@de.ibm.com>,
*/
#ifndef _ASM_S390_CPU_H
#define _ASM_S390_CPU_H
#define MAX_CPU_ADDRESS 255
#ifndef __ASSEMBLY__
#include <linux/types.h>
struct cpuid
{
unsigned int version : 8;
unsigned int ident : 24;
unsigned int machine : 16;
unsigned int unused : 16;
} __packed;
#endif /* __ASSEMBLY__ */
#endif /* _ASM_S390_CPU_H */

View File

@ -1,25 +0,0 @@
/*
* Copyright IBM Corp. 2000,2009
* Author(s): Hartmut Penner <hp@de.ibm.com>,
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Christian Ehrhardt <ehrhardt@de.ibm.com>
*/
#ifndef _ASM_S390_CPUID_H_
#define _ASM_S390_CPUID_H_
/*
* CPU type and hardware bug flags. Kept separately for each CPU.
* Members of this structure are referenced in head.S, so think twice
* before touching them. [mj]
*/
typedef struct
{
unsigned int version : 8;
unsigned int ident : 24;
unsigned int machine : 16;
unsigned int unused : 16;
} __attribute__ ((packed)) cpuid_t;
#endif /* _ASM_S390_CPUID_H_ */

View File

@ -167,6 +167,10 @@ debug_text_event(debug_info_t* id, int level, const char* txt)
return debug_event_common(id,level,txt,strlen(txt));
}
/*
* IMPORTANT: Use "%s" in sprintf format strings with care! Only pointers are
* stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details!
*/
extern debug_entry_t *
debug_sprintf_event(debug_info_t* id,int level,char *string,...)
__attribute__ ((format(printf, 3, 4)));
@ -206,7 +210,10 @@ debug_text_exception(debug_info_t* id, int level, const char* txt)
return debug_exception_common(id,level,txt,strlen(txt));
}
/*
* IMPORTANT: Use "%s" in sprintf format strings with care! Only pointers are
* stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details!
*/
extern debug_entry_t *
debug_sprintf_exception(debug_info_t* id,int level,char *string,...)
__attribute__ ((format(printf, 3, 4)));

View File

@ -18,13 +18,6 @@
#include <linux/interrupt.h>
#include <asm/lowcore.h>
/* irq_cpustat_t is unused currently, but could be converted
* into a percpu variable instead of storing softirq_pending
* on the lowcore */
typedef struct {
unsigned int __softirq_pending;
} irq_cpustat_t;
#define local_softirq_pending() (S390_lowcore.softirq_pending)
#define __ARCH_IRQ_STAT

View File

@ -57,6 +57,8 @@ struct ipl_block_fcp {
} __attribute__((packed));
#define DIAG308_VMPARM_SIZE 64
#define DIAG308_SCPDATA_SIZE (PAGE_SIZE - (sizeof(struct ipl_list_hdr) + \
offsetof(struct ipl_block_fcp, scp_data)))
struct ipl_block_ccw {
u8 load_parm[8];
@ -91,7 +93,8 @@ extern void do_halt(void);
extern void do_poff(void);
extern void ipl_save_parameters(void);
extern void ipl_update_parameters(void);
extern void get_ipl_vmparm(char *);
extern size_t append_ipl_vmparm(char *, size_t);
extern size_t append_ipl_scpdata(char *, size_t);
enum {
IPL_DEVNO_VALID = 1,

View File

@ -17,7 +17,7 @@
#include <linux/interrupt.h>
#include <linux/kvm_host.h>
#include <asm/debug.h>
#include <asm/cpuid.h>
#include <asm/cpu.h>
#define KVM_MAX_VCPUS 64
#define KVM_MEMORY_SLOTS 32
@ -217,8 +217,8 @@ struct kvm_vcpu_arch {
struct hrtimer ckc_timer;
struct tasklet_struct tasklet;
union {
cpuid_t cpu_id;
u64 stidp_data;
struct cpuid cpu_id;
u64 stidp_data;
};
};

View File

@ -54,14 +54,4 @@ struct kvm_vqconfig {
* This is pagesize for historical reasons. */
#define KVM_S390_VIRTIO_RING_ALIGN 4096
#ifdef __KERNEL__
/* early virtio console setup */
#ifdef CONFIG_S390_GUEST
extern void s390_virtio_console_init(void);
#else
static inline void s390_virtio_console_init(void)
{
}
#endif /* CONFIG_VIRTIO_CONSOLE */
#endif /* __KERNEL__ */
#endif

View File

@ -132,7 +132,7 @@
#ifndef __ASSEMBLY__
#include <asm/cpuid.h>
#include <asm/cpu.h>
#include <asm/ptrace.h>
#include <linux/types.h>
@ -275,7 +275,7 @@ struct _lowcore
__u32 user_exec_asce; /* 0x02ac */
/* SMP info area */
cpuid_t cpu_id; /* 0x02b0 */
struct cpuid cpu_id; /* 0x02b0 */
__u32 cpu_nr; /* 0x02b8 */
__u32 softirq_pending; /* 0x02bc */
__u32 percpu_offset; /* 0x02c0 */
@ -380,7 +380,7 @@ struct _lowcore
__u64 user_exec_asce; /* 0x0318 */
/* SMP info area */
cpuid_t cpu_id; /* 0x0320 */
struct cpuid cpu_id; /* 0x0320 */
__u32 cpu_nr; /* 0x0328 */
__u32 softirq_pending; /* 0x032c */
__u64 percpu_offset; /* 0x0330 */

View File

@ -2,6 +2,7 @@
#define __MMU_H
typedef struct {
spinlock_t list_lock;
struct list_head crst_list;
struct list_head pgtable_list;
unsigned long asce_bits;

View File

@ -125,8 +125,6 @@ page_get_storage_key(unsigned long addr)
return skey;
}
#ifdef CONFIG_PAGE_STATES
struct page;
void arch_free_page(struct page *page, int order);
void arch_alloc_page(struct page *page, int order);
@ -134,8 +132,6 @@ void arch_alloc_page(struct page *page, int order);
#define HAVE_ARCH_FREE_PAGE
#define HAVE_ARCH_ALLOC_PAGE
#endif
#endif /* !__ASSEMBLY__ */
#define __PAGE_OFFSET 0x0UL

View File

@ -140,6 +140,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
spin_lock_init(&mm->context.list_lock);
INIT_LIST_HEAD(&mm->context.crst_list);
INIT_LIST_HEAD(&mm->context.pgtable_list);
return (pgd_t *) crst_table_alloc(mm, s390_noexec);

View File

@ -14,7 +14,7 @@
#define __ASM_S390_PROCESSOR_H
#include <linux/linkage.h>
#include <asm/cpuid.h>
#include <asm/cpu.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/setup.h>
@ -26,7 +26,7 @@
*/
#define current_text_addr() ({ void *pc; asm("basr %0,0" : "=a" (pc)); pc; })
static inline void get_cpu_id(cpuid_t *ptr)
static inline void get_cpu_id(struct cpuid *ptr)
{
asm volatile("stidp 0(%1)" : "=m" (*ptr) : "a" (ptr));
}

View File

@ -1,19 +1 @@
#ifndef _ASMS390_SCATTERLIST_H
#define _ASMS390_SCATTERLIST_H
struct scatterlist {
#ifdef CONFIG_DEBUG_SG
unsigned long sg_magic;
#endif
unsigned long page_link;
unsigned int offset;
unsigned int length;
};
#ifdef __s390x__
#define ISA_DMA_THRESHOLD (0xffffffffffffffffUL)
#else
#define ISA_DMA_THRESHOLD (0xffffffffUL)
#endif
#endif /* _ASMS390X_SCATTERLIST_H */
#include <asm-generic/scatterlist.h>

View File

@ -1,15 +1,182 @@
/*
* Helper functions for scsw access.
*
* Copyright IBM Corp. 2008
* Copyright IBM Corp. 2008,2009
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
#ifndef _ASM_S390_SCSW_H_
#define _ASM_S390_SCSW_H_
#include <linux/types.h>
#include <linux/module.h>
#include <asm/chsc.h>
#include <asm/cio.h>
#include "css.h"
#include "chsc.h"
/**
* struct cmd_scsw - command-mode subchannel status word
* @key: subchannel key
* @sctl: suspend control
* @eswf: esw format
* @cc: deferred condition code
* @fmt: format
* @pfch: prefetch
* @isic: initial-status interruption control
* @alcc: address-limit checking control
* @ssi: suppress-suspended interruption
* @zcc: zero condition code
* @ectl: extended control
* @pno: path not operational
* @res: reserved
* @fctl: function control
* @actl: activity control
* @stctl: status control
* @cpa: channel program address
* @dstat: device status
* @cstat: subchannel status
* @count: residual count
*/
struct cmd_scsw {
__u32 key : 4;
__u32 sctl : 1;
__u32 eswf : 1;
__u32 cc : 2;
__u32 fmt : 1;
__u32 pfch : 1;
__u32 isic : 1;
__u32 alcc : 1;
__u32 ssi : 1;
__u32 zcc : 1;
__u32 ectl : 1;
__u32 pno : 1;
__u32 res : 1;
__u32 fctl : 3;
__u32 actl : 7;
__u32 stctl : 5;
__u32 cpa;
__u32 dstat : 8;
__u32 cstat : 8;
__u32 count : 16;
} __attribute__ ((packed));
/**
* struct tm_scsw - transport-mode subchannel status word
* @key: subchannel key
* @eswf: esw format
* @cc: deferred condition code
* @fmt: format
* @x: IRB-format control
* @q: interrogate-complete
* @ectl: extended control
* @pno: path not operational
* @fctl: function control
* @actl: activity control
* @stctl: status control
* @tcw: TCW address
* @dstat: device status
* @cstat: subchannel status
* @fcxs: FCX status
* @schxs: subchannel-extended status
*/
struct tm_scsw {
u32 key:4;
u32 :1;
u32 eswf:1;
u32 cc:2;
u32 fmt:3;
u32 x:1;
u32 q:1;
u32 :1;
u32 ectl:1;
u32 pno:1;
u32 :1;
u32 fctl:3;
u32 actl:7;
u32 stctl:5;
u32 tcw;
u32 dstat:8;
u32 cstat:8;
u32 fcxs:8;
u32 schxs:8;
} __attribute__ ((packed));
/**
* union scsw - subchannel status word
* @cmd: command-mode SCSW
* @tm: transport-mode SCSW
*/
union scsw {
struct cmd_scsw cmd;
struct tm_scsw tm;
} __attribute__ ((packed));
#define SCSW_FCTL_CLEAR_FUNC 0x1
#define SCSW_FCTL_HALT_FUNC 0x2
#define SCSW_FCTL_START_FUNC 0x4
#define SCSW_ACTL_SUSPENDED 0x1
#define SCSW_ACTL_DEVACT 0x2
#define SCSW_ACTL_SCHACT 0x4
#define SCSW_ACTL_CLEAR_PEND 0x8
#define SCSW_ACTL_HALT_PEND 0x10
#define SCSW_ACTL_START_PEND 0x20
#define SCSW_ACTL_RESUME_PEND 0x40
#define SCSW_STCTL_STATUS_PEND 0x1
#define SCSW_STCTL_SEC_STATUS 0x2
#define SCSW_STCTL_PRIM_STATUS 0x4
#define SCSW_STCTL_INTER_STATUS 0x8
#define SCSW_STCTL_ALERT_STATUS 0x10
#define DEV_STAT_ATTENTION 0x80
#define DEV_STAT_STAT_MOD 0x40
#define DEV_STAT_CU_END 0x20
#define DEV_STAT_BUSY 0x10
#define DEV_STAT_CHN_END 0x08
#define DEV_STAT_DEV_END 0x04
#define DEV_STAT_UNIT_CHECK 0x02
#define DEV_STAT_UNIT_EXCEP 0x01
#define SCHN_STAT_PCI 0x80
#define SCHN_STAT_INCORR_LEN 0x40
#define SCHN_STAT_PROG_CHECK 0x20
#define SCHN_STAT_PROT_CHECK 0x10
#define SCHN_STAT_CHN_DATA_CHK 0x08
#define SCHN_STAT_CHN_CTRL_CHK 0x04
#define SCHN_STAT_INTF_CTRL_CHK 0x02
#define SCHN_STAT_CHAIN_CHECK 0x01
/*
* architectured values for first sense byte
*/
#define SNS0_CMD_REJECT 0x80
#define SNS_CMD_REJECT SNS0_CMD_REJEC
#define SNS0_INTERVENTION_REQ 0x40
#define SNS0_BUS_OUT_CHECK 0x20
#define SNS0_EQUIPMENT_CHECK 0x10
#define SNS0_DATA_CHECK 0x08
#define SNS0_OVERRUN 0x04
#define SNS0_INCOMPL_DOMAIN 0x01
/*
* architectured values for second sense byte
*/
#define SNS1_PERM_ERR 0x80
#define SNS1_INV_TRACK_FORMAT 0x40
#define SNS1_EOC 0x20
#define SNS1_MESSAGE_TO_OPER 0x10
#define SNS1_NO_REC_FOUND 0x08
#define SNS1_FILE_PROTECTED 0x04
#define SNS1_WRITE_INHIBITED 0x02
#define SNS1_INPRECISE_END 0x01
/*
* architectured values for third sense byte
*/
#define SNS2_REQ_INH_WRITE 0x80
#define SNS2_CORRECTABLE 0x40
#define SNS2_FIRST_LOG_ERR 0x20
#define SNS2_ENV_DATA_PRESENT 0x10
#define SNS2_INPRECISE_END 0x04
/**
* scsw_is_tm - check for transport mode scsw
@ -18,11 +185,10 @@
* Return non-zero if the specified scsw is a transport mode scsw, zero
* otherwise.
*/
int scsw_is_tm(union scsw *scsw)
static inline int scsw_is_tm(union scsw *scsw)
{
return css_general_characteristics.fcx && (scsw->tm.x == 1);
}
EXPORT_SYMBOL(scsw_is_tm);
/**
* scsw_key - return scsw key field
@ -31,14 +197,13 @@ EXPORT_SYMBOL(scsw_is_tm);
* Return the value of the key field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
u32 scsw_key(union scsw *scsw)
static inline u32 scsw_key(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.key;
else
return scsw->cmd.key;
}
EXPORT_SYMBOL(scsw_key);
/**
* scsw_eswf - return scsw eswf field
@ -47,14 +212,13 @@ EXPORT_SYMBOL(scsw_key);
* Return the value of the eswf field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
u32 scsw_eswf(union scsw *scsw)
static inline u32 scsw_eswf(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.eswf;
else
return scsw->cmd.eswf;
}
EXPORT_SYMBOL(scsw_eswf);
/**
* scsw_cc - return scsw cc field
@ -63,14 +227,13 @@ EXPORT_SYMBOL(scsw_eswf);
* Return the value of the cc field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
u32 scsw_cc(union scsw *scsw)
static inline u32 scsw_cc(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.cc;
else
return scsw->cmd.cc;
}
EXPORT_SYMBOL(scsw_cc);
/**
* scsw_ectl - return scsw ectl field
@ -79,14 +242,13 @@ EXPORT_SYMBOL(scsw_cc);
* Return the value of the ectl field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
u32 scsw_ectl(union scsw *scsw)
static inline u32 scsw_ectl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.ectl;
else
return scsw->cmd.ectl;
}
EXPORT_SYMBOL(scsw_ectl);
/**
* scsw_pno - return scsw pno field
@ -95,14 +257,13 @@ EXPORT_SYMBOL(scsw_ectl);
* Return the value of the pno field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
u32 scsw_pno(union scsw *scsw)
static inline u32 scsw_pno(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.pno;
else
return scsw->cmd.pno;
}
EXPORT_SYMBOL(scsw_pno);
/**
* scsw_fctl - return scsw fctl field
@ -111,14 +272,13 @@ EXPORT_SYMBOL(scsw_pno);
* Return the value of the fctl field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
u32 scsw_fctl(union scsw *scsw)
static inline u32 scsw_fctl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.fctl;
else
return scsw->cmd.fctl;
}
EXPORT_SYMBOL(scsw_fctl);
/**
* scsw_actl - return scsw actl field
@ -127,14 +287,13 @@ EXPORT_SYMBOL(scsw_fctl);
* Return the value of the actl field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
u32 scsw_actl(union scsw *scsw)
static inline u32 scsw_actl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.actl;
else
return scsw->cmd.actl;
}
EXPORT_SYMBOL(scsw_actl);
/**
* scsw_stctl - return scsw stctl field
@ -143,14 +302,13 @@ EXPORT_SYMBOL(scsw_actl);
* Return the value of the stctl field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
u32 scsw_stctl(union scsw *scsw)
static inline u32 scsw_stctl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.stctl;
else
return scsw->cmd.stctl;
}
EXPORT_SYMBOL(scsw_stctl);
/**
* scsw_dstat - return scsw dstat field
@ -159,14 +317,13 @@ EXPORT_SYMBOL(scsw_stctl);
* Return the value of the dstat field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
u32 scsw_dstat(union scsw *scsw)
static inline u32 scsw_dstat(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.dstat;
else
return scsw->cmd.dstat;
}
EXPORT_SYMBOL(scsw_dstat);
/**
* scsw_cstat - return scsw cstat field
@ -175,14 +332,13 @@ EXPORT_SYMBOL(scsw_dstat);
* Return the value of the cstat field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
u32 scsw_cstat(union scsw *scsw)
static inline u32 scsw_cstat(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.cstat;
else
return scsw->cmd.cstat;
}
EXPORT_SYMBOL(scsw_cstat);
/**
* scsw_cmd_is_valid_key - check key field validity
@ -191,11 +347,10 @@ EXPORT_SYMBOL(scsw_cstat);
* Return non-zero if the key field of the specified command mode scsw is
* valid, zero otherwise.
*/
int scsw_cmd_is_valid_key(union scsw *scsw)
static inline int scsw_cmd_is_valid_key(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
}
EXPORT_SYMBOL(scsw_cmd_is_valid_key);
/**
* scsw_cmd_is_valid_sctl - check fctl field validity
@ -204,11 +359,10 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_key);
* Return non-zero if the fctl field of the specified command mode scsw is
* valid, zero otherwise.
*/
int scsw_cmd_is_valid_sctl(union scsw *scsw)
static inline int scsw_cmd_is_valid_sctl(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
}
EXPORT_SYMBOL(scsw_cmd_is_valid_sctl);
/**
* scsw_cmd_is_valid_eswf - check eswf field validity
@ -217,11 +371,10 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_sctl);
* Return non-zero if the eswf field of the specified command mode scsw is
* valid, zero otherwise.
*/
int scsw_cmd_is_valid_eswf(union scsw *scsw)
static inline int scsw_cmd_is_valid_eswf(union scsw *scsw)
{
return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND);
}
EXPORT_SYMBOL(scsw_cmd_is_valid_eswf);
/**
* scsw_cmd_is_valid_cc - check cc field validity
@ -230,12 +383,11 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_eswf);
* Return non-zero if the cc field of the specified command mode scsw is
* valid, zero otherwise.
*/
int scsw_cmd_is_valid_cc(union scsw *scsw)
static inline int scsw_cmd_is_valid_cc(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) &&
(scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND);
}
EXPORT_SYMBOL(scsw_cmd_is_valid_cc);
/**
* scsw_cmd_is_valid_fmt - check fmt field validity
@ -244,11 +396,10 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_cc);
* Return non-zero if the fmt field of the specified command mode scsw is
* valid, zero otherwise.
*/
int scsw_cmd_is_valid_fmt(union scsw *scsw)
static inline int scsw_cmd_is_valid_fmt(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
}
EXPORT_SYMBOL(scsw_cmd_is_valid_fmt);
/**
* scsw_cmd_is_valid_pfch - check pfch field validity
@ -257,11 +408,10 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_fmt);
* Return non-zero if the pfch field of the specified command mode scsw is
* valid, zero otherwise.
*/
int scsw_cmd_is_valid_pfch(union scsw *scsw)
static inline int scsw_cmd_is_valid_pfch(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
}
EXPORT_SYMBOL(scsw_cmd_is_valid_pfch);
/**
* scsw_cmd_is_valid_isic - check isic field validity
@ -270,11 +420,10 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_pfch);
* Return non-zero if the isic field of the specified command mode scsw is
* valid, zero otherwise.
*/
int scsw_cmd_is_valid_isic(union scsw *scsw)
static inline int scsw_cmd_is_valid_isic(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
}
EXPORT_SYMBOL(scsw_cmd_is_valid_isic);
/**
* scsw_cmd_is_valid_alcc - check alcc field validity
@ -283,11 +432,10 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_isic);
* Return non-zero if the alcc field of the specified command mode scsw is
* valid, zero otherwise.
*/
int scsw_cmd_is_valid_alcc(union scsw *scsw)
static inline int scsw_cmd_is_valid_alcc(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
}
EXPORT_SYMBOL(scsw_cmd_is_valid_alcc);
/**
* scsw_cmd_is_valid_ssi - check ssi field validity
@ -296,11 +444,10 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_alcc);
* Return non-zero if the ssi field of the specified command mode scsw is
* valid, zero otherwise.
*/
int scsw_cmd_is_valid_ssi(union scsw *scsw)
static inline int scsw_cmd_is_valid_ssi(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
}
EXPORT_SYMBOL(scsw_cmd_is_valid_ssi);
/**
* scsw_cmd_is_valid_zcc - check zcc field validity
@ -309,12 +456,11 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_ssi);
* Return non-zero if the zcc field of the specified command mode scsw is
* valid, zero otherwise.
*/
int scsw_cmd_is_valid_zcc(union scsw *scsw)
static inline int scsw_cmd_is_valid_zcc(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) &&
(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS);
}
EXPORT_SYMBOL(scsw_cmd_is_valid_zcc);
/**
* scsw_cmd_is_valid_ectl - check ectl field validity
@ -323,13 +469,12 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_zcc);
* Return non-zero if the ectl field of the specified command mode scsw is
* valid, zero otherwise.
*/
int scsw_cmd_is_valid_ectl(union scsw *scsw)
static inline int scsw_cmd_is_valid_ectl(union scsw *scsw)
{
return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
!(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) &&
(scsw->cmd.stctl & SCSW_STCTL_ALERT_STATUS);
}
EXPORT_SYMBOL(scsw_cmd_is_valid_ectl);
/**
* scsw_cmd_is_valid_pno - check pno field validity
@ -338,7 +483,7 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_ectl);
* Return non-zero if the pno field of the specified command mode scsw is
* valid, zero otherwise.
*/
int scsw_cmd_is_valid_pno(union scsw *scsw)
static inline int scsw_cmd_is_valid_pno(union scsw *scsw)
{
return (scsw->cmd.fctl != 0) &&
(scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
@ -346,7 +491,6 @@ int scsw_cmd_is_valid_pno(union scsw *scsw)
((scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) &&
(scsw->cmd.actl & SCSW_ACTL_SUSPENDED)));
}
EXPORT_SYMBOL(scsw_cmd_is_valid_pno);
/**
* scsw_cmd_is_valid_fctl - check fctl field validity
@ -355,12 +499,11 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_pno);
* Return non-zero if the fctl field of the specified command mode scsw is
* valid, zero otherwise.
*/
int scsw_cmd_is_valid_fctl(union scsw *scsw)
static inline int scsw_cmd_is_valid_fctl(union scsw *scsw)
{
/* Only valid if pmcw.dnv == 1*/
return 1;
}
EXPORT_SYMBOL(scsw_cmd_is_valid_fctl);
/**
* scsw_cmd_is_valid_actl - check actl field validity
@ -369,12 +512,11 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_fctl);
* Return non-zero if the actl field of the specified command mode scsw is
* valid, zero otherwise.
*/
int scsw_cmd_is_valid_actl(union scsw *scsw)
static inline int scsw_cmd_is_valid_actl(union scsw *scsw)
{
/* Only valid if pmcw.dnv == 1*/
return 1;
}
EXPORT_SYMBOL(scsw_cmd_is_valid_actl);
/**
* scsw_cmd_is_valid_stctl - check stctl field validity
@ -383,12 +525,11 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_actl);
* Return non-zero if the stctl field of the specified command mode scsw is
* valid, zero otherwise.
*/
int scsw_cmd_is_valid_stctl(union scsw *scsw)
static inline int scsw_cmd_is_valid_stctl(union scsw *scsw)
{
/* Only valid if pmcw.dnv == 1*/
return 1;
}
EXPORT_SYMBOL(scsw_cmd_is_valid_stctl);
/**
* scsw_cmd_is_valid_dstat - check dstat field validity
@ -397,12 +538,11 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_stctl);
* Return non-zero if the dstat field of the specified command mode scsw is
* valid, zero otherwise.
*/
int scsw_cmd_is_valid_dstat(union scsw *scsw)
static inline int scsw_cmd_is_valid_dstat(union scsw *scsw)
{
return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
(scsw->cmd.cc != 3);
}
EXPORT_SYMBOL(scsw_cmd_is_valid_dstat);
/**
* scsw_cmd_is_valid_cstat - check cstat field validity
@ -411,12 +551,11 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_dstat);
* Return non-zero if the cstat field of the specified command mode scsw is
* valid, zero otherwise.
*/
int scsw_cmd_is_valid_cstat(union scsw *scsw)
static inline int scsw_cmd_is_valid_cstat(union scsw *scsw)
{
return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
(scsw->cmd.cc != 3);
}
EXPORT_SYMBOL(scsw_cmd_is_valid_cstat);
/**
* scsw_tm_is_valid_key - check key field validity
@ -425,11 +564,10 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_cstat);
* Return non-zero if the key field of the specified transport mode scsw is
* valid, zero otherwise.
*/
int scsw_tm_is_valid_key(union scsw *scsw)
static inline int scsw_tm_is_valid_key(union scsw *scsw)
{
return (scsw->tm.fctl & SCSW_FCTL_START_FUNC);
}
EXPORT_SYMBOL(scsw_tm_is_valid_key);
/**
* scsw_tm_is_valid_eswf - check eswf field validity
@ -438,11 +576,10 @@ EXPORT_SYMBOL(scsw_tm_is_valid_key);
* Return non-zero if the eswf field of the specified transport mode scsw is
* valid, zero otherwise.
*/
int scsw_tm_is_valid_eswf(union scsw *scsw)
static inline int scsw_tm_is_valid_eswf(union scsw *scsw)
{
return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND);
}
EXPORT_SYMBOL(scsw_tm_is_valid_eswf);
/**
* scsw_tm_is_valid_cc - check cc field validity
@ -451,12 +588,11 @@ EXPORT_SYMBOL(scsw_tm_is_valid_eswf);
* Return non-zero if the cc field of the specified transport mode scsw is
* valid, zero otherwise.
*/
int scsw_tm_is_valid_cc(union scsw *scsw)
static inline int scsw_tm_is_valid_cc(union scsw *scsw)
{
return (scsw->tm.fctl & SCSW_FCTL_START_FUNC) &&
(scsw->tm.stctl & SCSW_STCTL_STATUS_PEND);
}
EXPORT_SYMBOL(scsw_tm_is_valid_cc);
/**
* scsw_tm_is_valid_fmt - check fmt field validity
@ -465,11 +601,10 @@ EXPORT_SYMBOL(scsw_tm_is_valid_cc);
* Return non-zero if the fmt field of the specified transport mode scsw is
* valid, zero otherwise.
*/
int scsw_tm_is_valid_fmt(union scsw *scsw)
static inline int scsw_tm_is_valid_fmt(union scsw *scsw)
{
return 1;
}
EXPORT_SYMBOL(scsw_tm_is_valid_fmt);
/**
* scsw_tm_is_valid_x - check x field validity
@ -478,11 +613,10 @@ EXPORT_SYMBOL(scsw_tm_is_valid_fmt);
* Return non-zero if the x field of the specified transport mode scsw is
* valid, zero otherwise.
*/
int scsw_tm_is_valid_x(union scsw *scsw)
static inline int scsw_tm_is_valid_x(union scsw *scsw)
{
return 1;
}
EXPORT_SYMBOL(scsw_tm_is_valid_x);
/**
* scsw_tm_is_valid_q - check q field validity
@ -491,11 +625,10 @@ EXPORT_SYMBOL(scsw_tm_is_valid_x);
* Return non-zero if the q field of the specified transport mode scsw is
* valid, zero otherwise.
*/
int scsw_tm_is_valid_q(union scsw *scsw)
static inline int scsw_tm_is_valid_q(union scsw *scsw)
{
return 1;
}
EXPORT_SYMBOL(scsw_tm_is_valid_q);
/**
* scsw_tm_is_valid_ectl - check ectl field validity
@ -504,13 +637,12 @@ EXPORT_SYMBOL(scsw_tm_is_valid_q);
* Return non-zero if the ectl field of the specified transport mode scsw is
* valid, zero otherwise.
*/
int scsw_tm_is_valid_ectl(union scsw *scsw)
static inline int scsw_tm_is_valid_ectl(union scsw *scsw)
{
return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
!(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) &&
(scsw->tm.stctl & SCSW_STCTL_ALERT_STATUS);
}
EXPORT_SYMBOL(scsw_tm_is_valid_ectl);
/**
* scsw_tm_is_valid_pno - check pno field validity
@ -519,7 +651,7 @@ EXPORT_SYMBOL(scsw_tm_is_valid_ectl);
* Return non-zero if the pno field of the specified transport mode scsw is
* valid, zero otherwise.
*/
int scsw_tm_is_valid_pno(union scsw *scsw)
static inline int scsw_tm_is_valid_pno(union scsw *scsw)
{
return (scsw->tm.fctl != 0) &&
(scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
@ -527,7 +659,6 @@ int scsw_tm_is_valid_pno(union scsw *scsw)
((scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) &&
(scsw->tm.actl & SCSW_ACTL_SUSPENDED)));
}
EXPORT_SYMBOL(scsw_tm_is_valid_pno);
/**
* scsw_tm_is_valid_fctl - check fctl field validity
@ -536,12 +667,11 @@ EXPORT_SYMBOL(scsw_tm_is_valid_pno);
* Return non-zero if the fctl field of the specified transport mode scsw is
* valid, zero otherwise.
*/
int scsw_tm_is_valid_fctl(union scsw *scsw)
static inline int scsw_tm_is_valid_fctl(union scsw *scsw)
{
/* Only valid if pmcw.dnv == 1*/
return 1;
}
EXPORT_SYMBOL(scsw_tm_is_valid_fctl);
/**
* scsw_tm_is_valid_actl - check actl field validity
@ -550,12 +680,11 @@ EXPORT_SYMBOL(scsw_tm_is_valid_fctl);
* Return non-zero if the actl field of the specified transport mode scsw is
* valid, zero otherwise.
*/
int scsw_tm_is_valid_actl(union scsw *scsw)
static inline int scsw_tm_is_valid_actl(union scsw *scsw)
{
/* Only valid if pmcw.dnv == 1*/
return 1;
}
EXPORT_SYMBOL(scsw_tm_is_valid_actl);
/**
* scsw_tm_is_valid_stctl - check stctl field validity
@ -564,12 +693,11 @@ EXPORT_SYMBOL(scsw_tm_is_valid_actl);
* Return non-zero if the stctl field of the specified transport mode scsw is
* valid, zero otherwise.
*/
int scsw_tm_is_valid_stctl(union scsw *scsw)
static inline int scsw_tm_is_valid_stctl(union scsw *scsw)
{
/* Only valid if pmcw.dnv == 1*/
return 1;
}
EXPORT_SYMBOL(scsw_tm_is_valid_stctl);
/**
* scsw_tm_is_valid_dstat - check dstat field validity
@ -578,12 +706,11 @@ EXPORT_SYMBOL(scsw_tm_is_valid_stctl);
* Return non-zero if the dstat field of the specified transport mode scsw is
* valid, zero otherwise.
*/
int scsw_tm_is_valid_dstat(union scsw *scsw)
static inline int scsw_tm_is_valid_dstat(union scsw *scsw)
{
return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
(scsw->tm.cc != 3);
}
EXPORT_SYMBOL(scsw_tm_is_valid_dstat);
/**
* scsw_tm_is_valid_cstat - check cstat field validity
@ -592,12 +719,11 @@ EXPORT_SYMBOL(scsw_tm_is_valid_dstat);
* Return non-zero if the cstat field of the specified transport mode scsw is
* valid, zero otherwise.
*/
int scsw_tm_is_valid_cstat(union scsw *scsw)
static inline int scsw_tm_is_valid_cstat(union scsw *scsw)
{
return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
(scsw->tm.cc != 3);
}
EXPORT_SYMBOL(scsw_tm_is_valid_cstat);
/**
* scsw_tm_is_valid_fcxs - check fcxs field validity
@ -606,11 +732,10 @@ EXPORT_SYMBOL(scsw_tm_is_valid_cstat);
* Return non-zero if the fcxs field of the specified transport mode scsw is
* valid, zero otherwise.
*/
int scsw_tm_is_valid_fcxs(union scsw *scsw)
static inline int scsw_tm_is_valid_fcxs(union scsw *scsw)
{
return 1;
}
EXPORT_SYMBOL(scsw_tm_is_valid_fcxs);
/**
* scsw_tm_is_valid_schxs - check schxs field validity
@ -619,14 +744,13 @@ EXPORT_SYMBOL(scsw_tm_is_valid_fcxs);
* Return non-zero if the schxs field of the specified transport mode scsw is
* valid, zero otherwise.
*/
int scsw_tm_is_valid_schxs(union scsw *scsw)
static inline int scsw_tm_is_valid_schxs(union scsw *scsw)
{
return (scsw->tm.cstat & (SCHN_STAT_PROG_CHECK |
SCHN_STAT_INTF_CTRL_CHK |
SCHN_STAT_PROT_CHECK |
SCHN_STAT_CHN_DATA_CHK));
}
EXPORT_SYMBOL(scsw_tm_is_valid_schxs);
/**
* scsw_is_valid_actl - check actl field validity
@ -636,14 +760,13 @@ EXPORT_SYMBOL(scsw_tm_is_valid_schxs);
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
int scsw_is_valid_actl(union scsw *scsw)
static inline int scsw_is_valid_actl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_actl(scsw);
else
return scsw_cmd_is_valid_actl(scsw);
}
EXPORT_SYMBOL(scsw_is_valid_actl);
/**
* scsw_is_valid_cc - check cc field validity
@ -653,14 +776,13 @@ EXPORT_SYMBOL(scsw_is_valid_actl);
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
int scsw_is_valid_cc(union scsw *scsw)
static inline int scsw_is_valid_cc(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_cc(scsw);
else
return scsw_cmd_is_valid_cc(scsw);
}
EXPORT_SYMBOL(scsw_is_valid_cc);
/**
* scsw_is_valid_cstat - check cstat field validity
@ -670,14 +792,13 @@ EXPORT_SYMBOL(scsw_is_valid_cc);
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
int scsw_is_valid_cstat(union scsw *scsw)
static inline int scsw_is_valid_cstat(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_cstat(scsw);
else
return scsw_cmd_is_valid_cstat(scsw);
}
EXPORT_SYMBOL(scsw_is_valid_cstat);
/**
* scsw_is_valid_dstat - check dstat field validity
@ -687,14 +808,13 @@ EXPORT_SYMBOL(scsw_is_valid_cstat);
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
int scsw_is_valid_dstat(union scsw *scsw)
static inline int scsw_is_valid_dstat(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_dstat(scsw);
else
return scsw_cmd_is_valid_dstat(scsw);
}
EXPORT_SYMBOL(scsw_is_valid_dstat);
/**
* scsw_is_valid_ectl - check ectl field validity
@ -704,14 +824,13 @@ EXPORT_SYMBOL(scsw_is_valid_dstat);
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
int scsw_is_valid_ectl(union scsw *scsw)
static inline int scsw_is_valid_ectl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_ectl(scsw);
else
return scsw_cmd_is_valid_ectl(scsw);
}
EXPORT_SYMBOL(scsw_is_valid_ectl);
/**
* scsw_is_valid_eswf - check eswf field validity
@ -721,14 +840,13 @@ EXPORT_SYMBOL(scsw_is_valid_ectl);
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
int scsw_is_valid_eswf(union scsw *scsw)
static inline int scsw_is_valid_eswf(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_eswf(scsw);
else
return scsw_cmd_is_valid_eswf(scsw);
}
EXPORT_SYMBOL(scsw_is_valid_eswf);
/**
* scsw_is_valid_fctl - check fctl field validity
@ -738,14 +856,13 @@ EXPORT_SYMBOL(scsw_is_valid_eswf);
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
int scsw_is_valid_fctl(union scsw *scsw)
static inline int scsw_is_valid_fctl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_fctl(scsw);
else
return scsw_cmd_is_valid_fctl(scsw);
}
EXPORT_SYMBOL(scsw_is_valid_fctl);
/**
* scsw_is_valid_key - check key field validity
@ -755,14 +872,13 @@ EXPORT_SYMBOL(scsw_is_valid_fctl);
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
int scsw_is_valid_key(union scsw *scsw)
static inline int scsw_is_valid_key(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_key(scsw);
else
return scsw_cmd_is_valid_key(scsw);
}
EXPORT_SYMBOL(scsw_is_valid_key);
/**
* scsw_is_valid_pno - check pno field validity
@ -772,14 +888,13 @@ EXPORT_SYMBOL(scsw_is_valid_key);
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
int scsw_is_valid_pno(union scsw *scsw)
static inline int scsw_is_valid_pno(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_pno(scsw);
else
return scsw_cmd_is_valid_pno(scsw);
}
EXPORT_SYMBOL(scsw_is_valid_pno);
/**
* scsw_is_valid_stctl - check stctl field validity
@ -789,14 +904,13 @@ EXPORT_SYMBOL(scsw_is_valid_pno);
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
int scsw_is_valid_stctl(union scsw *scsw)
static inline int scsw_is_valid_stctl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_stctl(scsw);
else
return scsw_cmd_is_valid_stctl(scsw);
}
EXPORT_SYMBOL(scsw_is_valid_stctl);
/**
* scsw_cmd_is_solicited - check for solicited scsw
@ -805,12 +919,11 @@ EXPORT_SYMBOL(scsw_is_valid_stctl);
* Return non-zero if the command mode scsw indicates that the associated
* status condition is solicited, zero if it is unsolicited.
*/
int scsw_cmd_is_solicited(union scsw *scsw)
static inline int scsw_cmd_is_solicited(union scsw *scsw)
{
return (scsw->cmd.cc != 0) || (scsw->cmd.stctl !=
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS));
}
EXPORT_SYMBOL(scsw_cmd_is_solicited);
/**
* scsw_tm_is_solicited - check for solicited scsw
@ -819,12 +932,11 @@ EXPORT_SYMBOL(scsw_cmd_is_solicited);
* Return non-zero if the transport mode scsw indicates that the associated
* status condition is solicited, zero if it is unsolicited.
*/
int scsw_tm_is_solicited(union scsw *scsw)
static inline int scsw_tm_is_solicited(union scsw *scsw)
{
return (scsw->tm.cc != 0) || (scsw->tm.stctl !=
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS));
}
EXPORT_SYMBOL(scsw_tm_is_solicited);
/**
* scsw_is_solicited - check for solicited scsw
@ -833,11 +945,12 @@ EXPORT_SYMBOL(scsw_tm_is_solicited);
* Return non-zero if the transport or command mode scsw indicates that the
* associated status condition is solicited, zero if it is unsolicited.
*/
int scsw_is_solicited(union scsw *scsw)
static inline int scsw_is_solicited(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_solicited(scsw);
else
return scsw_cmd_is_solicited(scsw);
}
EXPORT_SYMBOL(scsw_is_solicited);
#endif /* _ASM_S390_SCSW_H_ */

View File

@ -8,7 +8,7 @@
#ifndef _ASM_S390_SETUP_H
#define _ASM_S390_SETUP_H
#define COMMAND_LINE_SIZE 1024
#define COMMAND_LINE_SIZE 4096
#define ARCH_COMMAND_LINE_SIZE 896

View File

@ -51,32 +51,7 @@ extern void machine_power_off_smp(void);
#define PROC_CHANGE_PENALTY 20 /* Schedule penalty */
#define raw_smp_processor_id() (S390_lowcore.cpu_nr)
/*
* returns 1 if cpu is in stopped/check stopped state or not operational
* returns 0 otherwise
*/
static inline int
smp_cpu_not_running(int cpu)
{
__u32 status;
switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) {
case sigp_order_code_accepted:
case sigp_status_stored:
/* Check for stopped and check stop state */
if (status & 0x50)
return 1;
break;
case sigp_not_operational:
return 1;
default:
break;
}
return 0;
}
#define cpu_logical_map(cpu) (cpu)
#define cpu_logical_map(cpu) (cpu)
extern int __cpu_disable (void);
extern void __cpu_die (unsigned int cpu);
@ -91,11 +66,6 @@ extern void arch_send_call_function_ipi(cpumask_t mask);
#endif
#ifndef CONFIG_SMP
#define hard_smp_processor_id() 0
#define smp_cpu_not_running(cpu) 1
#endif
#ifdef CONFIG_HOTPLUG_CPU
extern int smp_rescan_cpus(void);
#else

View File

@ -109,11 +109,7 @@ extern void pfault_fini(void);
#define pfault_fini() do { } while (0)
#endif /* CONFIG_PFAULT */
#ifdef CONFIG_PAGE_STATES
extern void cmma_init(void);
#else
static inline void cmma_init(void) { }
#endif
#define finish_arch_switch(prev) do { \
set_fs(current->thread.mm_segment); \

View File

@ -90,4 +90,18 @@ unsigned long long monotonic_clock(void);
extern u64 sched_clock_base_cc;
/**
* get_clock_monotonic - returns current time in clock rate units
*
* The caller must ensure that preemption is disabled.
* The clock and sched_clock_base get changed via stop_machine.
* Therefore preemption must be disabled when calling this
* function, otherwise the returned value is not guaranteed to
* be monotonic.
*/
static inline unsigned long long get_clock_monotonic(void)
{
return get_clock_xt() - sched_clock_base_cc;
}
#endif

View File

@ -32,7 +32,7 @@ extra-y += head.o init_task.o vmlinux.lds
obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
obj-$(CONFIG_SMP) += smp.o topology.o
obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o
obj-$(CONFIG_AUDIT) += audit.o
compat-obj-$(CONFIG_AUDIT) += compat_audit.o
obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \
@ -41,7 +41,7 @@ obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o
obj-$(CONFIG_FUNCTION_TRACER) += $(if $(CONFIG_64BIT),mcount64.o,mcount.o)
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o

View File

@ -6,6 +6,9 @@
* Heiko Carstens <heiko.carstens@de.ibm.com>
*/
#define KMSG_COMPONENT "setup"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/compiler.h>
#include <linux/init.h>
#include <linux/errno.h>
@ -16,6 +19,7 @@
#include <linux/module.h>
#include <linux/pfn.h>
#include <linux/uaccess.h>
#include <linux/kernel.h>
#include <asm/ebcdic.h>
#include <asm/ipl.h>
#include <asm/lowcore.h>
@ -35,8 +39,6 @@
char kernel_nss_name[NSS_NAME_SIZE + 1];
static unsigned long machine_flags;
static void __init setup_boot_command_line(void);
/*
@ -81,6 +83,8 @@ asm(
" br 14\n"
" .size savesys_ipl_nss, .-savesys_ipl_nss\n");
static __initdata char upper_command_line[COMMAND_LINE_SIZE];
static noinline __init void create_kernel_nss(void)
{
unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size;
@ -90,7 +94,6 @@ static noinline __init void create_kernel_nss(void)
int response;
size_t len;
char *savesys_ptr;
char upper_command_line[COMMAND_LINE_SIZE];
char defsys_cmd[DEFSYS_CMD_SIZE];
char savesys_cmd[SAVESYS_CMD_SIZE];
@ -141,6 +144,8 @@ static noinline __init void create_kernel_nss(void)
__cpcmd(defsys_cmd, NULL, 0, &response);
if (response != 0) {
pr_err("Defining the Linux kernel NSS failed with rc=%d\n",
response);
kernel_nss_name[0] = '\0';
return;
}
@ -153,8 +158,11 @@ static noinline __init void create_kernel_nss(void)
* max SAVESYS_CMD_SIZE
* On error: response contains the numeric portion of cp error message.
* for SAVESYS it will be >= 263
* for missing privilege class, it will be 1
*/
if (response > SAVESYS_CMD_SIZE) {
if (response > SAVESYS_CMD_SIZE || response == 1) {
pr_err("Saving the Linux kernel NSS failed with rc=%d\n",
response);
kernel_nss_name[0] = '\0';
return;
}
@ -205,12 +213,9 @@ static noinline __init void detect_machine_type(void)
/* Running under KVM? If not we assume z/VM */
if (!memcmp(vmms.vm[0].cpi, "\xd2\xe5\xd4", 3))
machine_flags |= MACHINE_FLAG_KVM;
S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
else
machine_flags |= MACHINE_FLAG_VM;
/* Store machine flags for setting up lowcore early */
S390_lowcore.machine_flags = machine_flags;
S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
}
static __init void early_pgm_check_handler(void)
@ -245,7 +250,7 @@ static noinline __init void setup_hpage(void)
facilities = stfl();
if (!(facilities & (1UL << 23)) || !(facilities & (1UL << 29)))
return;
machine_flags |= MACHINE_FLAG_HPAGE;
S390_lowcore.machine_flags |= MACHINE_FLAG_HPAGE;
__ctl_set_bit(0, 23);
#endif
}
@ -263,7 +268,7 @@ static __init void detect_mvpg(void)
EX_TABLE(0b,1b)
: "=d" (rc) : "0" (-EOPNOTSUPP), "a" (0) : "memory", "cc", "0");
if (!rc)
machine_flags |= MACHINE_FLAG_MVPG;
S390_lowcore.machine_flags |= MACHINE_FLAG_MVPG;
#endif
}
@ -279,7 +284,7 @@ static __init void detect_ieee(void)
EX_TABLE(0b,1b)
: "=d" (rc), "=d" (tmp): "0" (-EOPNOTSUPP) : "cc");
if (!rc)
machine_flags |= MACHINE_FLAG_IEEE;
S390_lowcore.machine_flags |= MACHINE_FLAG_IEEE;
#endif
}
@ -298,7 +303,7 @@ static __init void detect_csp(void)
EX_TABLE(0b,1b)
: "=d" (rc) : "0" (-EOPNOTSUPP) : "cc", "0", "1", "2");
if (!rc)
machine_flags |= MACHINE_FLAG_CSP;
S390_lowcore.machine_flags |= MACHINE_FLAG_CSP;
#endif
}
@ -315,7 +320,7 @@ static __init void detect_diag9c(void)
EX_TABLE(0b,1b)
: "=d" (rc) : "0" (-EOPNOTSUPP), "d" (cpu_address) : "cc");
if (!rc)
machine_flags |= MACHINE_FLAG_DIAG9C;
S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG9C;
}
static __init void detect_diag44(void)
@ -330,7 +335,7 @@ static __init void detect_diag44(void)
EX_TABLE(0b,1b)
: "=d" (rc) : "0" (-EOPNOTSUPP) : "cc");
if (!rc)
machine_flags |= MACHINE_FLAG_DIAG44;
S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG44;
#endif
}
@ -341,11 +346,11 @@ static __init void detect_machine_facilities(void)
facilities = stfl();
if (facilities & (1 << 28))
machine_flags |= MACHINE_FLAG_IDTE;
S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
if (facilities & (1 << 23))
machine_flags |= MACHINE_FLAG_PFMF;
S390_lowcore.machine_flags |= MACHINE_FLAG_PFMF;
if (facilities & (1 << 4))
machine_flags |= MACHINE_FLAG_MVCOS;
S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS;
#endif
}
@ -367,21 +372,35 @@ static __init void rescue_initrd(void)
}
/* Set up boot command line */
static void __init append_to_cmdline(size_t (*ipl_data)(char *, size_t))
{
char *parm, *delim;
size_t rc, len;
len = strlen(boot_command_line);
delim = boot_command_line + len; /* '\0' character position */
parm = boot_command_line + len + 1; /* append right after '\0' */
rc = ipl_data(parm, COMMAND_LINE_SIZE - len - 1);
if (rc) {
if (*parm == '=')
memmove(boot_command_line, parm + 1, rc);
else
*delim = ' '; /* replace '\0' with space */
}
}
static void __init setup_boot_command_line(void)
{
char *parm = NULL;
/* copy arch command line */
strlcpy(boot_command_line, COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
/* append IPL PARM data to the boot command line */
if (MACHINE_IS_VM) {
parm = boot_command_line + strlen(boot_command_line);
*parm++ = ' ';
get_ipl_vmparm(parm);
if (parm[0] == '=')
memmove(boot_command_line, parm + 1, strlen(parm));
}
if (MACHINE_IS_VM)
append_to_cmdline(append_ipl_vmparm);
append_to_cmdline(append_ipl_scpdata);
}
@ -413,7 +432,6 @@ void __init startup_init(void)
setup_hpage();
sclp_facilities_detect();
detect_memory_layout(memory_chunk);
S390_lowcore.machine_flags = machine_flags;
#ifdef CONFIG_DYNAMIC_FTRACE
S390_lowcore.ftrace_func = (unsigned long)ftrace_caller;
#endif

View File

@ -278,7 +278,8 @@ sysc_return:
bnz BASED(sysc_work) # there is work to do (signals etc.)
sysc_restore:
#ifdef CONFIG_TRACE_IRQFLAGS
la %r1,BASED(sysc_restore_trace_psw)
la %r1,BASED(sysc_restore_trace_psw_addr)
l %r1,0(%r1)
lpsw 0(%r1)
sysc_restore_trace:
TRACE_IRQS_CHECK
@ -289,10 +290,15 @@ sysc_leave:
sysc_done:
#ifdef CONFIG_TRACE_IRQFLAGS
sysc_restore_trace_psw_addr:
.long sysc_restore_trace_psw
.section .data,"aw",@progbits
.align 8
.globl sysc_restore_trace_psw
sysc_restore_trace_psw:
.long 0, sysc_restore_trace + 0x80000000
.previous
#endif
#
@ -606,7 +612,8 @@ io_return:
bnz BASED(io_work) # there is work to do (signals etc.)
io_restore:
#ifdef CONFIG_TRACE_IRQFLAGS
la %r1,BASED(io_restore_trace_psw)
la %r1,BASED(io_restore_trace_psw_addr)
l %r1,0(%r1)
lpsw 0(%r1)
io_restore_trace:
TRACE_IRQS_CHECK
@ -617,10 +624,15 @@ io_leave:
io_done:
#ifdef CONFIG_TRACE_IRQFLAGS
io_restore_trace_psw_addr:
.long io_restore_trace_psw
.section .data,"aw",@progbits
.align 8
.globl io_restore_trace_psw
io_restore_trace_psw:
.long 0, io_restore_trace + 0x80000000
.previous
#endif
#

View File

@ -284,10 +284,12 @@ sysc_leave:
sysc_done:
#ifdef CONFIG_TRACE_IRQFLAGS
.section .data,"aw",@progbits
.align 8
.globl sysc_restore_trace_psw
sysc_restore_trace_psw:
.quad 0, sysc_restore_trace
.previous
#endif
#
@ -595,10 +597,12 @@ io_leave:
io_done:
#ifdef CONFIG_TRACE_IRQFLAGS
.section .data,"aw",@progbits
.align 8
.globl io_restore_trace_psw
io_restore_trace_psw:
.quad 0, io_restore_trace
.previous
#endif
#

View File

@ -27,6 +27,7 @@
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/page.h>
#include <asm/cpu.h>
#ifdef CONFIG_64BIT
#define ARCH_OFFSET 4

View File

@ -24,6 +24,7 @@ startup_continue:
# Setup stack
#
l %r15,.Linittu-.LPG1(%r13)
st %r15,__LC_THREAD_INFO # cache thread info in lowcore
mvc __LC_CURRENT(4),__TI_task(%r15)
ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE
st %r15,__LC_KERNEL_STACK # set end of kernel stack

View File

@ -62,9 +62,9 @@ startup_continue:
clr %r11,%r12
je 5f # no more space in prefix array
4:
ahi %r8,1 # next cpu (r8 += 1)
cl %r8,.Llast_cpu-.LPG1(%r13) # is last possible cpu ?
jl 1b # jump if not last cpu
ahi %r8,1 # next cpu (r8 += 1)
chi %r8,MAX_CPU_ADDRESS # is last possible cpu ?
jle 1b # jump if not last cpu
5:
lhi %r1,2 # mode 2 = esame (dump)
j 6f
@ -92,6 +92,7 @@ startup_continue:
# Setup stack
#
larl %r15,init_thread_union
stg %r15,__LC_THREAD_INFO # cache thread info in lowcore
lg %r14,__TI_task(%r15) # cache current in lowcore
stg %r14,__LC_CURRENT
aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE
@ -129,8 +130,6 @@ startup_continue:
#ifdef CONFIG_ZFCPDUMP
.Lcurrent_cpu:
.long 0x0
.Llast_cpu:
.long 0x0000ffff
.Lpref_arr_ptr:
.long zfcpdump_prefix_array
#endif /* CONFIG_ZFCPDUMP */

View File

@ -272,17 +272,18 @@ static ssize_t ipl_type_show(struct kobject *kobj, struct kobj_attribute *attr,
static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type);
/* VM IPL PARM routines */
static void reipl_get_ascii_vmparm(char *dest,
size_t reipl_get_ascii_vmparm(char *dest, size_t size,
const struct ipl_parameter_block *ipb)
{
int i;
int len = 0;
size_t len;
char has_lowercase = 0;
len = 0;
if ((ipb->ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID) &&
(ipb->ipl_info.ccw.vm_parm_len > 0)) {
len = ipb->ipl_info.ccw.vm_parm_len;
len = min_t(size_t, size - 1, ipb->ipl_info.ccw.vm_parm_len);
memcpy(dest, ipb->ipl_info.ccw.vm_parm, len);
/* If at least one character is lowercase, we assume mixed
* case; otherwise we convert everything to lowercase.
@ -299,14 +300,20 @@ static void reipl_get_ascii_vmparm(char *dest,
EBCASC(dest, len);
}
dest[len] = 0;
return len;
}
void get_ipl_vmparm(char *dest)
size_t append_ipl_vmparm(char *dest, size_t size)
{
size_t rc;
rc = 0;
if (diag308_set_works && (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_CCW))
reipl_get_ascii_vmparm(dest, &ipl_block);
rc = reipl_get_ascii_vmparm(dest, size, &ipl_block);
else
dest[0] = 0;
return rc;
}
static ssize_t ipl_vm_parm_show(struct kobject *kobj,
@ -314,10 +321,65 @@ static ssize_t ipl_vm_parm_show(struct kobject *kobj,
{
char parm[DIAG308_VMPARM_SIZE + 1] = {};
get_ipl_vmparm(parm);
append_ipl_vmparm(parm, sizeof(parm));
return sprintf(page, "%s\n", parm);
}
static size_t scpdata_length(const char* buf, size_t count)
{
while (count) {
if (buf[count - 1] != '\0' && buf[count - 1] != ' ')
break;
count--;
}
return count;
}
size_t reipl_append_ascii_scpdata(char *dest, size_t size,
const struct ipl_parameter_block *ipb)
{
size_t count;
size_t i;
int has_lowercase;
count = min(size - 1, scpdata_length(ipb->ipl_info.fcp.scp_data,
ipb->ipl_info.fcp.scp_data_len));
if (!count)
goto out;
has_lowercase = 0;
for (i = 0; i < count; i++) {
if (!isascii(ipb->ipl_info.fcp.scp_data[i])) {
count = 0;
goto out;
}
if (!has_lowercase && islower(ipb->ipl_info.fcp.scp_data[i]))
has_lowercase = 1;
}
if (has_lowercase)
memcpy(dest, ipb->ipl_info.fcp.scp_data, count);
else
for (i = 0; i < count; i++)
dest[i] = tolower(ipb->ipl_info.fcp.scp_data[i]);
out:
dest[count] = '\0';
return count;
}
size_t append_ipl_scpdata(char *dest, size_t len)
{
size_t rc;
rc = 0;
if (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_FCP)
rc = reipl_append_ascii_scpdata(dest, len, &ipl_block);
else
dest[0] = 0;
return rc;
}
static struct kobj_attribute sys_ipl_vm_parm_attr =
__ATTR(parm, S_IRUGO, ipl_vm_parm_show, NULL);
@ -553,7 +615,7 @@ static ssize_t reipl_generic_vmparm_show(struct ipl_parameter_block *ipb,
{
char vmparm[DIAG308_VMPARM_SIZE + 1] = {};
reipl_get_ascii_vmparm(vmparm, ipb);
reipl_get_ascii_vmparm(vmparm, sizeof(vmparm), ipb);
return sprintf(page, "%s\n", vmparm);
}
@ -626,6 +688,59 @@ static struct kobj_attribute sys_reipl_ccw_vmparm_attr =
/* FCP reipl device attributes */
static ssize_t reipl_fcp_scpdata_read(struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
size_t size = reipl_block_fcp->ipl_info.fcp.scp_data_len;
void *scp_data = reipl_block_fcp->ipl_info.fcp.scp_data;
return memory_read_from_buffer(buf, count, &off, scp_data, size);
}
static ssize_t reipl_fcp_scpdata_write(struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
size_t padding;
size_t scpdata_len;
if (off < 0)
return -EINVAL;
if (off >= DIAG308_SCPDATA_SIZE)
return -ENOSPC;
if (count > DIAG308_SCPDATA_SIZE - off)
count = DIAG308_SCPDATA_SIZE - off;
memcpy(reipl_block_fcp->ipl_info.fcp.scp_data, buf + off, count);
scpdata_len = off + count;
if (scpdata_len % 8) {
padding = 8 - (scpdata_len % 8);
memset(reipl_block_fcp->ipl_info.fcp.scp_data + scpdata_len,
0, padding);
scpdata_len += padding;
}
reipl_block_fcp->ipl_info.fcp.scp_data_len = scpdata_len;
reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN + scpdata_len;
reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN + scpdata_len;
return count;
}
static struct bin_attribute sys_reipl_fcp_scp_data_attr = {
.attr = {
.name = "scp_data",
.mode = S_IRUGO | S_IWUSR,
},
.size = PAGE_SIZE,
.read = reipl_fcp_scpdata_read,
.write = reipl_fcp_scpdata_write,
};
DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n",
reipl_block_fcp->ipl_info.fcp.wwpn);
DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%016llx\n",
@ -647,7 +762,6 @@ static struct attribute *reipl_fcp_attrs[] = {
};
static struct attribute_group reipl_fcp_attr_group = {
.name = IPL_FCP_STR,
.attrs = reipl_fcp_attrs,
};
@ -895,6 +1009,7 @@ static struct kobj_attribute reipl_type_attr =
__ATTR(reipl_type, 0644, reipl_type_show, reipl_type_store);
static struct kset *reipl_kset;
static struct kset *reipl_fcp_kset;
static void get_ipl_string(char *dst, struct ipl_parameter_block *ipb,
const enum ipl_method m)
@ -906,7 +1021,7 @@ static void get_ipl_string(char *dst, struct ipl_parameter_block *ipb,
reipl_get_ascii_loadparm(loadparm, ipb);
reipl_get_ascii_nss_name(nss_name, ipb);
reipl_get_ascii_vmparm(vmparm, ipb);
reipl_get_ascii_vmparm(vmparm, sizeof(vmparm), ipb);
switch (m) {
case REIPL_METHOD_CCW_VM:
@ -1076,23 +1191,44 @@ static int __init reipl_fcp_init(void)
int rc;
if (!diag308_set_works) {
if (ipl_info.type == IPL_TYPE_FCP)
if (ipl_info.type == IPL_TYPE_FCP) {
make_attrs_ro(reipl_fcp_attrs);
else
sys_reipl_fcp_scp_data_attr.attr.mode = S_IRUGO;
} else
return 0;
}
reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL);
if (!reipl_block_fcp)
return -ENOMEM;
rc = sysfs_create_group(&reipl_kset->kobj, &reipl_fcp_attr_group);
/* sysfs: create fcp kset for mixing attr group and bin attrs */
reipl_fcp_kset = kset_create_and_add(IPL_FCP_STR, NULL,
&reipl_kset->kobj);
if (!reipl_kset) {
free_page((unsigned long) reipl_block_fcp);
return -ENOMEM;
}
rc = sysfs_create_group(&reipl_fcp_kset->kobj, &reipl_fcp_attr_group);
if (rc) {
free_page((unsigned long)reipl_block_fcp);
kset_unregister(reipl_fcp_kset);
free_page((unsigned long) reipl_block_fcp);
return rc;
}
if (ipl_info.type == IPL_TYPE_FCP) {
rc = sysfs_create_bin_file(&reipl_fcp_kset->kobj,
&sys_reipl_fcp_scp_data_attr);
if (rc) {
sysfs_remove_group(&reipl_fcp_kset->kobj, &reipl_fcp_attr_group);
kset_unregister(reipl_fcp_kset);
free_page((unsigned long) reipl_block_fcp);
return rc;
}
if (ipl_info.type == IPL_TYPE_FCP)
memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE);
} else {
else {
reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN;
reipl_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION;
reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN;

View File

@ -11,111 +11,27 @@
ftrace_stub:
br %r14
#ifdef CONFIG_64BIT
#ifdef CONFIG_DYNAMIC_FTRACE
.globl _mcount
_mcount:
br %r14
.globl ftrace_caller
ftrace_caller:
larl %r1,function_trace_stop
icm %r1,0xf,0(%r1)
bnzr %r14
stmg %r2,%r5,32(%r15)
stg %r14,112(%r15)
lgr %r1,%r15
aghi %r15,-160
stg %r1,__SF_BACKCHAIN(%r15)
lgr %r2,%r14
lg %r3,168(%r15)
larl %r14,ftrace_dyn_func
lg %r14,0(%r14)
basr %r14,%r14
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_caller
ftrace_graph_caller:
# This unconditional branch gets runtime patched. Change only if
# you know what you are doing. See ftrace_enable_graph_caller().
j 0f
lg %r2,272(%r15)
lg %r3,168(%r15)
brasl %r14,prepare_ftrace_return
stg %r2,168(%r15)
0:
#endif
aghi %r15,160
lmg %r2,%r5,32(%r15)
lg %r14,112(%r15)
#ifdef CONFIG_DYNAMIC_FTRACE
br %r14
.data
.globl ftrace_dyn_func
ftrace_dyn_func:
.quad ftrace_stub
.long ftrace_stub
.previous
#else /* CONFIG_DYNAMIC_FTRACE */
.globl _mcount
_mcount:
larl %r1,function_trace_stop
icm %r1,0xf,0(%r1)
bnzr %r14
stmg %r2,%r5,32(%r15)
stg %r14,112(%r15)
lgr %r1,%r15
aghi %r15,-160
stg %r1,__SF_BACKCHAIN(%r15)
lgr %r2,%r14
lg %r3,168(%r15)
larl %r14,ftrace_trace_function
lg %r14,0(%r14)
basr %r14,%r14
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
lg %r2,272(%r15)
lg %r3,168(%r15)
brasl %r14,prepare_ftrace_return
stg %r2,168(%r15)
#endif
aghi %r15,160
lmg %r2,%r5,32(%r15)
lg %r14,112(%r15)
br %r14
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl return_to_handler
return_to_handler:
stmg %r2,%r5,32(%r15)
lgr %r1,%r15
aghi %r15,-160
stg %r1,__SF_BACKCHAIN(%r15)
brasl %r14,ftrace_return_to_handler
aghi %r15,160
lgr %r14,%r2
lmg %r2,%r5,32(%r15)
br %r14
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#else /* CONFIG_64BIT */
#ifdef CONFIG_DYNAMIC_FTRACE
.globl _mcount
_mcount:
br %r14
.globl ftrace_caller
ftrace_caller:
#endif
stm %r2,%r5,16(%r15)
bras %r1,2f
#ifdef CONFIG_DYNAMIC_FTRACE
0: .long ftrace_dyn_func
#else
0: .long ftrace_trace_function
#endif
1: .long function_trace_stop
2: l %r2,1b-0b(%r1)
icm %r2,0xf,0(%r2)
@ -131,11 +47,13 @@ ftrace_caller:
l %r14,0(%r14)
basr %r14,%r14
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#ifdef CONFIG_DYNAMIC_FTRACE
.globl ftrace_graph_caller
ftrace_graph_caller:
# This unconditional branch gets runtime patched. Change only if
# you know what you are doing. See ftrace_enable_graph_caller().
j 1f
#endif
bras %r1,0f
.long prepare_ftrace_return
0: l %r2,152(%r15)
@ -150,49 +68,6 @@ ftrace_graph_caller:
3: lm %r2,%r5,16(%r15)
br %r14
.data
.globl ftrace_dyn_func
ftrace_dyn_func:
.long ftrace_stub
.previous
#else /* CONFIG_DYNAMIC_FTRACE */
.globl _mcount
_mcount:
stm %r2,%r5,16(%r15)
bras %r1,2f
0: .long ftrace_trace_function
1: .long function_trace_stop
2: l %r2,1b-0b(%r1)
icm %r2,0xf,0(%r2)
jnz 3f
st %r14,56(%r15)
lr %r0,%r15
ahi %r15,-96
l %r3,100(%r15)
la %r2,0(%r14)
st %r0,__SF_BACKCHAIN(%r15)
la %r3,0(%r3)
l %r14,0b-0b(%r1)
l %r14,0(%r14)
basr %r14,%r14
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
bras %r1,0f
.long prepare_ftrace_return
0: l %r2,152(%r15)
l %r4,0(%r1)
l %r3,100(%r15)
basr %r14,%r4
st %r2,100(%r15)
#endif
ahi %r15,96
l %r14,56(%r15)
3: lm %r2,%r5,16(%r15)
br %r14
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl return_to_handler
@ -211,6 +86,4 @@ return_to_handler:
lm %r2,%r5,16(%r15)
br %r14
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#endif /* CONFIG_64BIT */
#endif

View File

@ -0,0 +1,78 @@
/*
* Copyright IBM Corp. 2008,2009
*
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
*
*/
#include <asm/asm-offsets.h>
.globl ftrace_stub
ftrace_stub:
br %r14
.globl _mcount
_mcount:
#ifdef CONFIG_DYNAMIC_FTRACE
br %r14
.data
.globl ftrace_dyn_func
ftrace_dyn_func:
.quad ftrace_stub
.previous
.globl ftrace_caller
ftrace_caller:
#endif
larl %r1,function_trace_stop
icm %r1,0xf,0(%r1)
bnzr %r14
stmg %r2,%r5,32(%r15)
stg %r14,112(%r15)
lgr %r1,%r15
aghi %r15,-160
stg %r1,__SF_BACKCHAIN(%r15)
lgr %r2,%r14
lg %r3,168(%r15)
#ifdef CONFIG_DYNAMIC_FTRACE
larl %r14,ftrace_dyn_func
#else
larl %r14,ftrace_trace_function
#endif
lg %r14,0(%r14)
basr %r14,%r14
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#ifdef CONFIG_DYNAMIC_FTRACE
.globl ftrace_graph_caller
ftrace_graph_caller:
# This unconditional branch gets runtime patched. Change only if
# you know what you are doing. See ftrace_enable_graph_caller().
j 0f
#endif
lg %r2,272(%r15)
lg %r3,168(%r15)
brasl %r14,prepare_ftrace_return
stg %r2,168(%r15)
0:
#endif
aghi %r15,160
lmg %r2,%r5,32(%r15)
lg %r14,112(%r15)
br %r14
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl return_to_handler
return_to_handler:
stmg %r2,%r5,32(%r15)
lgr %r1,%r15
aghi %r15,-160
stg %r1,__SF_BACKCHAIN(%r15)
brasl %r14,ftrace_return_to_handler
aghi %r15,160
lgr %r14,%r2
lmg %r2,%r5,32(%r15)
br %r14
#endif

View File

@ -156,15 +156,11 @@ __setup("condev=", condev_setup);
static void __init set_preferred_console(void)
{
if (MACHINE_IS_KVM) {
if (MACHINE_IS_KVM)
add_preferred_console("hvc", 0, NULL);
s390_virtio_console_init();
return;
}
if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
else if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
add_preferred_console("ttyS", 0, NULL);
if (CONSOLE_IS_3270)
else if (CONSOLE_IS_3270)
add_preferred_console("tty3270", 0, NULL);
}

View File

@ -49,6 +49,7 @@
#include <asm/sclp.h>
#include <asm/cputime.h>
#include <asm/vdso.h>
#include <asm/cpu.h>
#include "entry.h"
static struct task_struct *current_set[NR_CPUS];
@ -70,6 +71,23 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices);
static void smp_ext_bitcall(int, ec_bit_sig);
static int cpu_stopped(int cpu)
{
__u32 status;
switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) {
case sigp_order_code_accepted:
case sigp_status_stored:
/* Check for stopped and check stop state */
if (status & 0x50)
return 1;
break;
default:
break;
}
return 0;
}
void smp_send_stop(void)
{
int cpu, rc;
@ -86,7 +104,7 @@ void smp_send_stop(void)
rc = signal_processor(cpu, sigp_stop);
} while (rc == sigp_busy);
while (!smp_cpu_not_running(cpu))
while (!cpu_stopped(cpu))
cpu_relax();
}
}
@ -269,19 +287,6 @@ static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { }
#endif /* CONFIG_ZFCPDUMP */
static int cpu_stopped(int cpu)
{
__u32 status;
/* Check for stopped state */
if (signal_processor_ps(&status, 0, cpu, sigp_sense) ==
sigp_status_stored) {
if (status & 0x40)
return 1;
}
return 0;
}
static int cpu_known(int cpu_id)
{
int cpu;
@ -300,7 +305,7 @@ static int smp_rescan_cpus_sigp(cpumask_t avail)
logical_cpu = cpumask_first(&avail);
if (logical_cpu >= nr_cpu_ids)
return 0;
for (cpu_id = 0; cpu_id <= 65535; cpu_id++) {
for (cpu_id = 0; cpu_id <= MAX_CPU_ADDRESS; cpu_id++) {
if (cpu_known(cpu_id))
continue;
__cpu_logical_map[logical_cpu] = cpu_id;
@ -379,7 +384,7 @@ static void __init smp_detect_cpus(void)
/* Use sigp detection algorithm if sclp doesn't work. */
if (sclp_get_cpu_info(info)) {
smp_use_sigp_detection = 1;
for (cpu = 0; cpu <= 65535; cpu++) {
for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) {
if (cpu == boot_cpu_addr)
continue;
__cpu_logical_map[CPU_INIT_NO] = cpu;
@ -635,7 +640,7 @@ int __cpu_disable(void)
void __cpu_die(unsigned int cpu)
{
/* Wait until target cpu is down */
while (!smp_cpu_not_running(cpu))
while (!cpu_stopped(cpu))
cpu_relax();
smp_free_lowcore(cpu);
pr_info("Processor %d stopped\n", cpu);

View File

@ -1,13 +1,44 @@
/*
* Support for suspend and resume on s390
* Suspend support specific for s390.
*
* Copyright IBM Corp. 2009
*
* Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
*
*/
#include <linux/suspend.h>
#include <linux/reboot.h>
#include <linux/pfn.h>
#include <linux/mm.h>
#include <asm/sections.h>
#include <asm/system.h>
#include <asm/ipl.h>
/*
* References to section boundaries
*/
extern const void __nosave_begin, __nosave_end;
/*
* check if given pfn is in the 'nosave' or in the read only NSS section
*/
int pfn_is_nosave(unsigned long pfn)
{
unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end))
>> PAGE_SHIFT;
unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1;
unsigned long stext_pfn = PFN_DOWN(__pa(&_stext));
if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
return 1;
if (pfn >= stext_pfn && pfn <= eshared_pfn) {
if (ipl_info.type == IPL_TYPE_NSS)
return 1;
} else if ((tprot(pfn * PAGE_SIZE) && pfn > 0))
return 1;
return 0;
}
void save_processor_state(void)
{

View File

@ -21,7 +21,7 @@
* This function runs with disabled interrupts.
*/
.section .text
.align 2
.align 4
.globl swsusp_arch_suspend
swsusp_arch_suspend:
stmg %r6,%r15,__SF_GPRS(%r15)

View File

@ -60,6 +60,7 @@
#define TICK_SIZE tick
u64 sched_clock_base_cc = -1; /* Force to data section. */
EXPORT_SYMBOL_GPL(sched_clock_base_cc);
static DEFINE_PER_CPU(struct clock_event_device, comparators);
@ -68,7 +69,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators);
*/
unsigned long long notrace sched_clock(void)
{
return ((get_clock_xt() - sched_clock_base_cc) * 125) >> 9;
return (get_clock_monotonic() * 125) >> 9;
}
/*

View File

@ -52,55 +52,18 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
_eshared = .; /* End of shareable data */
. = ALIGN(16); /* Exception table */
__ex_table : {
__start___ex_table = .;
*(__ex_table)
__stop___ex_table = .;
} :data
EXCEPTION_TABLE(16) :data
.data : { /* Data */
DATA_DATA
CONSTRUCTORS
}
RW_DATA_SECTION(0x100, PAGE_SIZE, THREAD_SIZE)
. = ALIGN(PAGE_SIZE);
.data_nosave : {
__nosave_begin = .;
*(.data.nosave)
}
. = ALIGN(PAGE_SIZE);
__nosave_end = .;
. = ALIGN(PAGE_SIZE);
.data.page_aligned : {
*(.data.idt)
}
. = ALIGN(0x100);
.data.cacheline_aligned : {
*(.data.cacheline_aligned)
}
. = ALIGN(0x100);
.data.read_mostly : {
*(.data.read_mostly)
}
_edata = .; /* End of data section */
. = ALIGN(THREAD_SIZE); /* init_task */
.data.init_task : {
*(.data.init_task)
}
/* will be freed after init */
. = ALIGN(PAGE_SIZE); /* Init code and data */
__init_begin = .;
.init.text : {
_sinittext = .;
INIT_TEXT
_einittext = .;
}
INIT_TEXT_SECTION(PAGE_SIZE)
/*
* .exit.text is discarded at runtime, not link time,
* to deal with references from __bug_table
@ -111,49 +74,13 @@ SECTIONS
/* early.c uses stsi, which requires page aligned data. */
. = ALIGN(PAGE_SIZE);
.init.data : {
INIT_DATA
}
. = ALIGN(0x100);
.init.setup : {
__setup_start = .;
*(.init.setup)
__setup_end = .;
}
.initcall.init : {
__initcall_start = .;
INITCALLS
__initcall_end = .;
}
.con_initcall.init : {
__con_initcall_start = .;
*(.con_initcall.init)
__con_initcall_end = .;
}
SECURITY_INIT
#ifdef CONFIG_BLK_DEV_INITRD
. = ALIGN(0x100);
.init.ramfs : {
__initramfs_start = .;
*(.init.ramfs)
. = ALIGN(2);
__initramfs_end = .;
}
#endif
INIT_DATA_SECTION(0x100)
PERCPU(PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
__init_end = .; /* freed after init ends here */
/* BSS */
.bss : {
__bss_start = .;
*(.bss)
. = ALIGN(2);
__bss_stop = .;
}
BSS_SECTION(0, 2, 0)
_end = . ;

View File

@ -2,7 +2,7 @@
# Makefile for the linux s390-specific parts of the memory manager.
#
obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o
obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o \
page-states.o
obj-$(CONFIG_CMM) += cmm.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_PAGE_STATES) += page-states.o

View File

@ -10,6 +10,7 @@
* Copyright (C) 1995 Linus Torvalds
*/
#include <linux/perf_counter.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@ -305,7 +306,7 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int write)
* interrupts again and then search the VMAs
*/
local_irq_enable();
perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
down_read(&mm->mmap_sem);
si_code = SEGV_MAPERR;
@ -363,11 +364,15 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int write)
}
BUG();
}
if (fault & VM_FAULT_MAJOR)
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
else
perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
regs, address);
} else {
tsk->min_flt++;
perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
regs, address);
}
up_read(&mm->mmap_sem);
/*
* The instruction that caused the program check will

View File

@ -1,6 +1,4 @@
/*
* arch/s390/mm/page-states.c
*
* Copyright IBM Corp. 2008
*
* Guest page hinting for unused pages.
@ -17,11 +15,12 @@
#define ESSA_SET_STABLE 1
#define ESSA_SET_UNUSED 2
static int cmma_flag;
static int cmma_flag = 1;
static int __init cmma(char *str)
{
char *parm;
parm = strstrip(str);
if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) {
cmma_flag = 1;
@ -32,7 +31,6 @@ static int __init cmma(char *str)
return 1;
return 0;
}
__setup("cmma=", cmma);
void __init cmma_init(void)

View File

@ -78,9 +78,9 @@ unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec)
}
page->index = page_to_phys(shadow);
}
spin_lock(&mm->page_table_lock);
spin_lock(&mm->context.list_lock);
list_add(&page->lru, &mm->context.crst_list);
spin_unlock(&mm->page_table_lock);
spin_unlock(&mm->context.list_lock);
return (unsigned long *) page_to_phys(page);
}
@ -89,9 +89,9 @@ void crst_table_free(struct mm_struct *mm, unsigned long *table)
unsigned long *shadow = get_shadow_table(table);
struct page *page = virt_to_page(table);
spin_lock(&mm->page_table_lock);
spin_lock(&mm->context.list_lock);
list_del(&page->lru);
spin_unlock(&mm->page_table_lock);
spin_unlock(&mm->context.list_lock);
if (shadow)
free_pages((unsigned long) shadow, ALLOC_ORDER);
free_pages((unsigned long) table, ALLOC_ORDER);
@ -182,7 +182,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
unsigned long bits;
bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
spin_lock(&mm->page_table_lock);
spin_lock(&mm->context.list_lock);
page = NULL;
if (!list_empty(&mm->context.pgtable_list)) {
page = list_first_entry(&mm->context.pgtable_list,
@ -191,7 +191,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
page = NULL;
}
if (!page) {
spin_unlock(&mm->page_table_lock);
spin_unlock(&mm->context.list_lock);
page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
if (!page)
return NULL;
@ -202,7 +202,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
clear_table_pgstes(table);
else
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
spin_lock(&mm->page_table_lock);
spin_lock(&mm->context.list_lock);
list_add(&page->lru, &mm->context.pgtable_list);
}
table = (unsigned long *) page_to_phys(page);
@ -213,7 +213,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
page->flags |= bits;
if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
list_move_tail(&page->lru, &mm->context.pgtable_list);
spin_unlock(&mm->page_table_lock);
spin_unlock(&mm->context.list_lock);
return table;
}
@ -225,7 +225,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
spin_lock(&mm->page_table_lock);
spin_lock(&mm->context.list_lock);
page->flags ^= bits;
if (page->flags & FRAG_MASK) {
/* Page now has some free pgtable fragments. */
@ -234,7 +234,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
} else
/* All fragments of the 4K page have been freed. */
list_del(&page->lru);
spin_unlock(&mm->page_table_lock);
spin_unlock(&mm->context.list_lock);
if (page) {
pgtable_page_dtor(page);
__free_page(page);
@ -245,7 +245,7 @@ void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
{
struct page *page;
spin_lock(&mm->page_table_lock);
spin_lock(&mm->context.list_lock);
/* Free shadow region and segment tables. */
list_for_each_entry(page, &mm->context.crst_list, lru)
if (page->index) {
@ -255,7 +255,7 @@ void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
/* "Free" second halves of page tables. */
list_for_each_entry(page, &mm->context.pgtable_list, lru)
page->flags &= ~SECOND_HALVES;
spin_unlock(&mm->page_table_lock);
spin_unlock(&mm->context.list_lock);
mm->context.noexec = 0;
update_mm(mm, tsk);
}

View File

@ -331,6 +331,7 @@ void __init vmem_map_init(void)
unsigned long start, end;
int i;
spin_lock_init(&init_mm.context.list_lock);
INIT_LIST_HEAD(&init_mm.context.crst_list);
INIT_LIST_HEAD(&init_mm.context.pgtable_list);
init_mm.context.noexec = 0;

View File

@ -1,8 +0,0 @@
#
# Makefile for s390 PM support
#
obj-$(CONFIG_HIBERNATION) += suspend.o
obj-$(CONFIG_HIBERNATION) += swsusp.o
obj-$(CONFIG_HIBERNATION) += swsusp_64.o
obj-$(CONFIG_HIBERNATION) += swsusp_asm64.o

View File

@ -1,40 +0,0 @@
/*
* Suspend support specific for s390.
*
* Copyright IBM Corp. 2009
*
* Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
*/
#include <linux/mm.h>
#include <linux/suspend.h>
#include <linux/reboot.h>
#include <linux/pfn.h>
#include <asm/sections.h>
#include <asm/ipl.h>
/*
* References to section boundaries
*/
extern const void __nosave_begin, __nosave_end;
/*
* check if given pfn is in the 'nosave' or in the read only NSS section
*/
int pfn_is_nosave(unsigned long pfn)
{
unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end))
>> PAGE_SHIFT;
unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1;
unsigned long stext_pfn = PFN_DOWN(__pa(&_stext));
if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
return 1;
if (pfn >= stext_pfn && pfn <= eshared_pfn) {
if (ipl_info.type == IPL_TYPE_NSS)
return 1;
} else if ((tprot(pfn * PAGE_SIZE) && pfn > 0))
return 1;
return 0;
}

View File

@ -1,17 +0,0 @@
/*
* Support for suspend and resume on s390
*
* Copyright IBM Corp. 2009
*
* Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
*
*/
#include <asm/system.h>
#include <linux/interrupt.h>
void do_after_copyback(void)
{
mb();
}

View File

@ -1006,7 +1006,7 @@ static int __init hvc_iucv_alloc(int id, unsigned int is_console)
priv->dev->release = (void (*)(struct device *)) kfree;
rc = device_register(priv->dev);
if (rc) {
kfree(priv->dev);
put_device(priv->dev);
goto out_error_dev;
}

View File

@ -669,14 +669,14 @@ static void dasd_profile_end(struct dasd_block *block,
* memory and 2) dasd_smalloc_request uses the static ccw memory
* that gets allocated for each device.
*/
struct dasd_ccw_req *dasd_kmalloc_request(char *magic, int cplength,
struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength,
int datasize,
struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
/* Sanity checks */
BUG_ON( magic == NULL || datasize > PAGE_SIZE ||
BUG_ON(datasize > PAGE_SIZE ||
(cplength*sizeof(struct ccw1)) > PAGE_SIZE);
cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
@ -700,14 +700,13 @@ struct dasd_ccw_req *dasd_kmalloc_request(char *magic, int cplength,
return ERR_PTR(-ENOMEM);
}
}
strncpy((char *) &cqr->magic, magic, 4);
ASCEBC((char *) &cqr->magic, 4);
cqr->magic = magic;
set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
dasd_get_device(device);
return cqr;
}
struct dasd_ccw_req *dasd_smalloc_request(char *magic, int cplength,
struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
int datasize,
struct dasd_device *device)
{
@ -717,7 +716,7 @@ struct dasd_ccw_req *dasd_smalloc_request(char *magic, int cplength,
int size;
/* Sanity checks */
BUG_ON( magic == NULL || datasize > PAGE_SIZE ||
BUG_ON(datasize > PAGE_SIZE ||
(cplength*sizeof(struct ccw1)) > PAGE_SIZE);
size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
@ -744,8 +743,7 @@ struct dasd_ccw_req *dasd_smalloc_request(char *magic, int cplength,
cqr->data = data;
memset(cqr->data, 0, datasize);
}
strncpy((char *) &cqr->magic, magic, 4);
ASCEBC((char *) &cqr->magic, 4);
cqr->magic = magic;
set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
dasd_get_device(device);
return cqr;
@ -899,9 +897,6 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
switch (rc) {
case 0:
cqr->status = DASD_CQR_IN_IO;
DBF_DEV_EVENT(DBF_DEBUG, device,
"start_IO: request %p started successful",
cqr);
break;
case -EBUSY:
DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
@ -1699,8 +1694,11 @@ static void __dasd_process_request_queue(struct dasd_block *block)
* for that. State DASD_STATE_ONLINE is normal block device
* operation.
*/
if (basedev->state < DASD_STATE_READY)
if (basedev->state < DASD_STATE_READY) {
while ((req = blk_fetch_request(block->request_queue)))
__blk_end_request_all(req, -EIO);
return;
}
/* Now we try to fetch requests from the request queue */
while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) {
if (basedev->features & DASD_FEATURE_READONLY &&
@ -2530,7 +2528,7 @@ EXPORT_SYMBOL_GPL(dasd_generic_restore_device);
static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
void *rdc_buffer,
int rdc_buffer_size,
char *magic)
int magic)
{
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
@ -2561,7 +2559,7 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
}
int dasd_generic_read_dev_chars(struct dasd_device *device, char *magic,
int dasd_generic_read_dev_chars(struct dasd_device *device, int magic,
void *rdc_buffer, int rdc_buffer_size)
{
int ret;

View File

@ -7,7 +7,7 @@
*
*/
#define KMSG_COMPONENT "dasd"
#define KMSG_COMPONENT "dasd-eckd"
#include <linux/timer.h>
#include <linux/slab.h>

View File

@ -5,7 +5,7 @@
* Author(s): Stefan Weinhuber <wein@de.ibm.com>
*/
#define KMSG_COMPONENT "dasd"
#define KMSG_COMPONENT "dasd-eckd"
#include <linux/list.h>
#include <asm/ebcdic.h>
@ -379,8 +379,7 @@ static int read_unit_address_configuration(struct dasd_device *device,
int rc;
unsigned long flags;
cqr = dasd_kmalloc_request("ECKD",
1 /* PSF */ + 1 /* RSSD */ ,
cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
(sizeof(struct dasd_psf_prssd_data)),
device);
if (IS_ERR(cqr))

View File

@ -8,7 +8,7 @@
*
*/
#define KMSG_COMPONENT "dasd"
#define KMSG_COMPONENT "dasd-diag"
#include <linux/stddef.h>
#include <linux/kernel.h>
@ -523,8 +523,7 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
/* Build the request */
datasize = sizeof(struct dasd_diag_req) +
count*sizeof(struct dasd_diag_bio);
cqr = dasd_smalloc_request(dasd_diag_discipline.name, 0,
datasize, memdev);
cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, datasize, memdev);
if (IS_ERR(cqr))
return cqr;

View File

@ -10,7 +10,7 @@
* Author.........: Nigel Hislop <hislop_nigel@emc.com>
*/
#define KMSG_COMPONENT "dasd"
#define KMSG_COMPONENT "dasd-eckd"
#include <linux/stddef.h>
#include <linux/kernel.h>
@ -730,7 +730,8 @@ static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device,
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
cqr = dasd_smalloc_request("ECKD", 1 /* RCD */, ciw->count, device);
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */, ciw->count,
device);
if (IS_ERR(cqr)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
@ -934,8 +935,7 @@ static int dasd_eckd_read_features(struct dasd_device *device)
struct dasd_eckd_private *private;
private = (struct dasd_eckd_private *) device->private;
cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1 /* PSF */ + 1 /* RSSD */ ,
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
(sizeof(struct dasd_psf_prssd_data) +
sizeof(struct dasd_rssd_features)),
device);
@ -998,7 +998,7 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
struct dasd_psf_ssc_data *psf_ssc_data;
struct ccw1 *ccw;
cqr = dasd_smalloc_request("ECKD", 1 /* PSF */ ,
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
sizeof(struct dasd_psf_ssc_data),
device);
@ -1149,8 +1149,8 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
goto out_err3;
/* Read Device Characteristics */
rc = dasd_generic_read_dev_chars(device, "ECKD", &private->rdc_data,
64);
rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
&private->rdc_data, 64);
if (rc) {
DBF_EVENT(DBF_WARNING,
"Read device characteristics failed, rc=%d for "
@ -1217,8 +1217,7 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
cplength = 8;
datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
cplength, datasize, device);
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device);
if (IS_ERR(cqr))
return cqr;
ccw = cqr->cpaddr;
@ -1499,8 +1498,7 @@ dasd_eckd_format_device(struct dasd_device * device,
return ERR_PTR(-EINVAL);
}
/* Allocate the format ccw request. */
fcp = dasd_smalloc_request(dasd_eckd_discipline.name,
cplength, datasize, device);
fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device);
if (IS_ERR(fcp))
return fcp;
@ -1783,8 +1781,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
datasize += count*sizeof(struct LO_eckd_data);
}
/* Allocate the ccw request. */
cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
cplength, datasize, startdev);
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
startdev);
if (IS_ERR(cqr))
return cqr;
ccw = cqr->cpaddr;
@ -1948,8 +1946,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
cidaw * sizeof(unsigned long long);
/* Allocate the ccw request. */
cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
cplength, datasize, startdev);
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
startdev);
if (IS_ERR(cqr))
return cqr;
ccw = cqr->cpaddr;
@ -2249,8 +2247,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
/* Allocate the ccw request. */
itcw_size = itcw_calc_size(0, ctidaw, 0);
cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
0, itcw_size, startdev);
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
if (IS_ERR(cqr))
return cqr;
@ -2557,8 +2554,7 @@ dasd_eckd_release(struct dasd_device *device)
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1, 32, device);
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
if (IS_ERR(cqr)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate initialization request");
@ -2600,8 +2596,7 @@ dasd_eckd_reserve(struct dasd_device *device)
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1, 32, device);
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
if (IS_ERR(cqr)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate initialization request");
@ -2642,8 +2637,7 @@ dasd_eckd_steal_lock(struct dasd_device *device)
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1, 32, device);
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
if (IS_ERR(cqr)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate initialization request");
@ -2681,8 +2675,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp)
struct ccw1 *ccw;
int rc;
cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1 /* PSF */ + 1 /* RSSD */ ,
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
(sizeof(struct dasd_psf_prssd_data) +
sizeof(struct dasd_rssd_perf_stats_t)),
device);
@ -2828,7 +2821,7 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
}
/* setup CCWs for PSF + RSSD */
cqr = dasd_smalloc_request("ECKD", 2 , 0, device);
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 , 0, device);
if (IS_ERR(cqr)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate initialization request");
@ -3254,7 +3247,7 @@ int dasd_eckd_restore_device(struct dasd_device *device)
/* Read Device Characteristics */
memset(&private->rdc_data, 0, sizeof(private->rdc_data));
rc = dasd_generic_read_dev_chars(device, "ECKD",
rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
&private->rdc_data, 64);
if (rc) {
DBF_EVENT(DBF_WARNING,

View File

@ -6,7 +6,7 @@
* Author(s): Stefan Weinhuber <wein@de.ibm.com>
*/
#define KMSG_COMPONENT "dasd"
#define KMSG_COMPONENT "dasd-eckd"
#include <linux/init.h>
#include <linux/fs.h>
@ -464,7 +464,7 @@ int dasd_eer_enable(struct dasd_device *device)
if (!device->discipline || strcmp(device->discipline->name, "ECKD"))
return -EPERM; /* FIXME: -EMEDIUMTYPE ? */
cqr = dasd_kmalloc_request("ECKD", 1 /* SNSS */,
cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */,
SNSS_DATA_SIZE, device);
if (IS_ERR(cqr))
return -ENOMEM;

View File

@ -99,8 +99,8 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr)
cqr->lpm = LPM_ANYPATH;
cqr->status = DASD_CQR_FILLED;
} else {
dev_err(&device->cdev->dev,
"default ERP has run out of retries and failed\n");
pr_err("%s: default ERP has run out of retries and failed\n",
dev_name(&device->cdev->dev));
cqr->status = DASD_CQR_FAILED;
cqr->stopclk = get_clock();
}

View File

@ -5,7 +5,7 @@
* Copyright IBM Corp. 1999, 2009
*/
#define KMSG_COMPONENT "dasd"
#define KMSG_COMPONENT "dasd-fba"
#include <linux/stddef.h>
#include <linux/kernel.h>
@ -152,8 +152,8 @@ dasd_fba_check_characteristics(struct dasd_device *device)
block->base = device;
/* Read Device Characteristics */
rc = dasd_generic_read_dev_chars(device, "FBA ", &private->rdc_data,
32);
rc = dasd_generic_read_dev_chars(device, DASD_FBA_MAGIC,
&private->rdc_data, 32);
if (rc) {
DBF_EVENT(DBF_WARNING, "Read device characteristics returned "
"error %d for device: %s",
@ -305,8 +305,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
datasize += (count - 1)*sizeof(struct LO_fba_data);
}
/* Allocate the ccw request. */
cqr = dasd_smalloc_request(dasd_fba_discipline.name,
cplength, datasize, memdev);
cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev);
if (IS_ERR(cqr))
return cqr;
ccw = cqr->cpaddr;

View File

@ -59,6 +59,11 @@
#include <asm/dasd.h>
#include <asm/idals.h>
/* DASD discipline magic */
#define DASD_ECKD_MAGIC 0xC5C3D2C4
#define DASD_DIAG_MAGIC 0xC4C9C1C7
#define DASD_FBA_MAGIC 0xC6C2C140
/*
* SECTION: Type definitions
*/
@ -540,9 +545,9 @@ extern struct block_device_operations dasd_device_operations;
extern struct kmem_cache *dasd_page_cache;
struct dasd_ccw_req *
dasd_kmalloc_request(char *, int, int, struct dasd_device *);
dasd_kmalloc_request(int , int, int, struct dasd_device *);
struct dasd_ccw_req *
dasd_smalloc_request(char *, int, int, struct dasd_device *);
dasd_smalloc_request(int , int, int, struct dasd_device *);
void dasd_kfree_request(struct dasd_ccw_req *, struct dasd_device *);
void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *);
@ -587,7 +592,7 @@ void dasd_generic_handle_state_change(struct dasd_device *);
int dasd_generic_pm_freeze(struct ccw_device *);
int dasd_generic_restore_device(struct ccw_device *);
int dasd_generic_read_dev_chars(struct dasd_device *, char *, void *, int);
int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int);
char *dasd_get_sense(struct irb *);
/* externals in dasd_devmap.c */

View File

@ -98,8 +98,8 @@ static int dasd_ioctl_quiesce(struct dasd_block *block)
if (!capable (CAP_SYS_ADMIN))
return -EACCES;
dev_info(&base->cdev->dev, "The DASD has been put in the quiesce "
"state\n");
pr_info("%s: The DASD has been put in the quiesce "
"state\n", dev_name(&base->cdev->dev));
spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
base->stopped |= DASD_STOPPED_QUIESCE;
spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
@ -119,8 +119,8 @@ static int dasd_ioctl_resume(struct dasd_block *block)
if (!capable (CAP_SYS_ADMIN))
return -EACCES;
dev_info(&base->cdev->dev, "I/O operations have been resumed "
"on the DASD\n");
pr_info("%s: I/O operations have been resumed "
"on the DASD\n", dev_name(&base->cdev->dev));
spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
base->stopped &= ~DASD_STOPPED_QUIESCE;
spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
@ -146,8 +146,8 @@ static int dasd_format(struct dasd_block *block, struct format_data_t *fdata)
return -EPERM;
if (base->state != DASD_STATE_BASIC) {
dev_warn(&base->cdev->dev,
"The DASD cannot be formatted while it is enabled\n");
pr_warning("%s: The DASD cannot be formatted while it is "
"enabled\n", dev_name(&base->cdev->dev));
return -EBUSY;
}
@ -175,9 +175,9 @@ static int dasd_format(struct dasd_block *block, struct format_data_t *fdata)
dasd_sfree_request(cqr, cqr->memdev);
if (rc) {
if (rc != -ERESTARTSYS)
dev_err(&base->cdev->dev,
"Formatting unit %d failed with "
"rc=%d\n", fdata->start_unit, rc);
pr_err("%s: Formatting unit %d failed with "
"rc=%d\n", dev_name(&base->cdev->dev),
fdata->start_unit, rc);
return rc;
}
fdata->start_unit++;
@ -204,9 +204,9 @@ dasd_ioctl_format(struct block_device *bdev, void __user *argp)
if (copy_from_user(&fdata, argp, sizeof(struct format_data_t)))
return -EFAULT;
if (bdev != bdev->bd_contains) {
dev_warn(&block->base->cdev->dev,
"The specified DASD is a partition and cannot be "
"formatted\n");
pr_warning("%s: The specified DASD is a partition and cannot "
"be formatted\n",
dev_name(&block->base->cdev->dev));
return -EINVAL;
}
return dasd_format(block, &fdata);

View File

@ -42,7 +42,6 @@
#include <linux/suspend.h>
#include <linux/platform_device.h>
#include <asm/uaccess.h>
#include <asm/checksum.h>
#define XPRAM_NAME "xpram"
#define XPRAM_DEVS 1 /* one partition */
@ -51,7 +50,6 @@
typedef struct {
unsigned int size; /* size of xpram segment in pages */
unsigned int offset; /* start page of xpram segment */
unsigned int csum; /* partition checksum for suspend */
} xpram_device_t;
static xpram_device_t xpram_devices[XPRAM_MAX_DEVS];
@ -386,58 +384,6 @@ static int __init xpram_setup_blkdev(void)
return rc;
}
/*
* Save checksums for all partitions.
*/
static int xpram_save_checksums(void)
{
unsigned long mem_page;
int rc, i;
rc = 0;
mem_page = (unsigned long) __get_free_page(GFP_KERNEL);
if (!mem_page)
return -ENOMEM;
for (i = 0; i < xpram_devs; i++) {
rc = xpram_page_in(mem_page, xpram_devices[i].offset);
if (rc)
goto fail;
xpram_devices[i].csum = csum_partial((const void *) mem_page,
PAGE_SIZE, 0);
}
fail:
free_page(mem_page);
return rc ? -ENXIO : 0;
}
/*
* Verify checksums for all partitions.
*/
static int xpram_validate_checksums(void)
{
unsigned long mem_page;
unsigned int csum;
int rc, i;
rc = 0;
mem_page = (unsigned long) __get_free_page(GFP_KERNEL);
if (!mem_page)
return -ENOMEM;
for (i = 0; i < xpram_devs; i++) {
rc = xpram_page_in(mem_page, xpram_devices[i].offset);
if (rc)
goto fail;
csum = csum_partial((const void *) mem_page, PAGE_SIZE, 0);
if (xpram_devices[i].csum != csum) {
rc = -EINVAL;
goto fail;
}
}
fail:
free_page(mem_page);
return rc ? -ENXIO : 0;
}
/*
* Resume failed: Print error message and call panic.
*/
@ -458,21 +404,10 @@ static int xpram_restore(struct device *dev)
xpram_resume_error("xpram disappeared");
if (xpram_pages != xpram_highest_page_index() + 1)
xpram_resume_error("Size of xpram changed");
if (xpram_validate_checksums())
xpram_resume_error("Data of xpram changed");
return 0;
}
/*
* Save necessary state in suspend.
*/
static int xpram_freeze(struct device *dev)
{
return xpram_save_checksums();
}
static struct dev_pm_ops xpram_pm_ops = {
.freeze = xpram_freeze,
.restore = xpram_restore,
};

View File

@ -82,6 +82,16 @@ config SCLP_CPI
You should only select this option if you know what you are doing,
need this feature and intend to run your kernel in LPAR.
config SCLP_ASYNC
tristate "Support for Call Home via Asynchronous SCLP Records"
depends on S390
help
This option enables the call home function, which is able to inform
the service element and connected organisations about a kernel panic.
You should only select this option if you know what you are doing,
want for inform other people about your kernel panics,
need this feature and intend to run your kernel in LPAR.
config S390_TAPE
tristate "S/390 tape device support"
depends on CCW

View File

@ -16,6 +16,7 @@ obj-$(CONFIG_SCLP_TTY) += sclp_tty.o
obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o
obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o
obj-$(CONFIG_SCLP_CPI) += sclp_cpi.o
obj-$(CONFIG_SCLP_ASYNC) += sclp_async.o
obj-$(CONFIG_ZVM_WATCHDOG) += vmwatchdog.o
obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o

View File

@ -581,7 +581,7 @@ static int __init mon_init(void)
monreader_device->release = (void (*)(struct device *))kfree;
rc = device_register(monreader_device);
if (rc) {
kfree(monreader_device);
put_device(monreader_device);
goto out_driver;
}

View File

@ -27,6 +27,7 @@
#define EVTYP_VT220MSG 0x1A
#define EVTYP_CONFMGMDATA 0x04
#define EVTYP_SDIAS 0x1C
#define EVTYP_ASYNC 0x0A
#define EVTYP_OPCMD_MASK 0x80000000
#define EVTYP_MSG_MASK 0x40000000
@ -38,6 +39,7 @@
#define EVTYP_VT220MSG_MASK 0x00000040
#define EVTYP_CONFMGMDATA_MASK 0x10000000
#define EVTYP_SDIAS_MASK 0x00000010
#define EVTYP_ASYNC_MASK 0x00400000
#define GNRLMSGFLGS_DOM 0x8000
#define GNRLMSGFLGS_SNDALRM 0x4000
@ -85,12 +87,12 @@ struct sccb_header {
} __attribute__((packed));
extern u64 sclp_facilities;
#define SCLP_HAS_CHP_INFO (sclp_facilities & 0x8000000000000000ULL)
#define SCLP_HAS_CHP_RECONFIG (sclp_facilities & 0x2000000000000000ULL)
#define SCLP_HAS_CPU_INFO (sclp_facilities & 0x0800000000000000ULL)
#define SCLP_HAS_CPU_RECONFIG (sclp_facilities & 0x0400000000000000ULL)
struct gds_subvector {
u8 length;
u8 key;

View File

@ -0,0 +1,224 @@
/*
* Enable Asynchronous Notification via SCLP.
*
* Copyright IBM Corp. 2009
* Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/kmod.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/proc_fs.h>
#include <linux/sysctl.h>
#include <linux/utsname.h>
#include "sclp.h"
static int callhome_enabled;
static struct sclp_req *request;
static struct sclp_async_sccb *sccb;
static int sclp_async_send_wait(char *message);
static struct ctl_table_header *callhome_sysctl_header;
static DEFINE_SPINLOCK(sclp_async_lock);
static char nodename[64];
#define SCLP_NORMAL_WRITE 0x00
struct async_evbuf {
struct evbuf_header header;
u64 reserved;
u8 rflags;
u8 empty;
u8 rtype;
u8 otype;
char comp_id[12];
char data[3000]; /* there is still some space left */
} __attribute__((packed));
struct sclp_async_sccb {
struct sccb_header header;
struct async_evbuf evbuf;
} __attribute__((packed));
static struct sclp_register sclp_async_register = {
.send_mask = EVTYP_ASYNC_MASK,
};
static int call_home_on_panic(struct notifier_block *self,
unsigned long event, void *data)
{
strncat(data, nodename, strlen(nodename));
sclp_async_send_wait(data);
return NOTIFY_DONE;
}
static struct notifier_block call_home_panic_nb = {
.notifier_call = call_home_on_panic,
.priority = INT_MAX,
};
static int proc_handler_callhome(ctl_table *ctl, int write, struct file *filp,
void __user *buffer, size_t *count,
loff_t *ppos)
{
unsigned long val;
int len, rc;
char buf[2];
if (!*count | (*ppos && !write)) {
*count = 0;
return 0;
}
if (!write) {
len = sprintf(buf, "%d\n", callhome_enabled);
buf[len] = '\0';
rc = copy_to_user(buffer, buf, sizeof(buf));
if (rc != 0)
return -EFAULT;
} else {
len = *count;
rc = copy_from_user(buf, buffer, sizeof(buf));
if (rc != 0)
return -EFAULT;
if (strict_strtoul(buf, 0, &val) != 0)
return -EINVAL;
if (val != 0 && val != 1)
return -EINVAL;
callhome_enabled = val;
}
*count = len;
*ppos += len;
return 0;
}
static struct ctl_table callhome_table[] = {
{
.procname = "callhome",
.mode = 0644,
.proc_handler = &proc_handler_callhome,
},
{ .ctl_name = 0 }
};
static struct ctl_table kern_dir_table[] = {
{
.ctl_name = CTL_KERN,
.procname = "kernel",
.maxlen = 0,
.mode = 0555,
.child = callhome_table,
},
{ .ctl_name = 0 }
};
/*
* Function used to transfer asynchronous notification
* records which waits for send completion
*/
static int sclp_async_send_wait(char *message)
{
struct async_evbuf *evb;
int rc;
unsigned long flags;
if (!callhome_enabled)
return 0;
sccb->evbuf.header.type = EVTYP_ASYNC;
sccb->evbuf.rtype = 0xA5;
sccb->evbuf.otype = 0x00;
evb = &sccb->evbuf;
request->command = SCLP_CMDW_WRITE_EVENT_DATA;
request->sccb = sccb;
request->status = SCLP_REQ_FILLED;
strncpy(sccb->evbuf.data, message, sizeof(sccb->evbuf.data));
/*
* Retain Queue
* e.g. 5639CC140 500 Red Hat RHEL5 Linux for zSeries (RHEL AS)
*/
strncpy(sccb->evbuf.comp_id, "000000000", sizeof(sccb->evbuf.comp_id));
sccb->evbuf.header.length = sizeof(sccb->evbuf);
sccb->header.length = sizeof(sccb->evbuf) + sizeof(sccb->header);
sccb->header.function_code = SCLP_NORMAL_WRITE;
rc = sclp_add_request(request);
if (rc)
return rc;
spin_lock_irqsave(&sclp_async_lock, flags);
while (request->status != SCLP_REQ_DONE &&
request->status != SCLP_REQ_FAILED) {
sclp_sync_wait();
}
spin_unlock_irqrestore(&sclp_async_lock, flags);
if (request->status != SCLP_REQ_DONE)
return -EIO;
rc = ((struct sclp_async_sccb *)
request->sccb)->header.response_code;
if (rc != 0x0020)
return -EIO;
if (evb->header.flags != 0x80)
return -EIO;
return rc;
}
static int __init sclp_async_init(void)
{
int rc;
rc = sclp_register(&sclp_async_register);
if (rc)
return rc;
callhome_sysctl_header = register_sysctl_table(kern_dir_table);
if (!callhome_sysctl_header) {
rc = -ENOMEM;
goto out_sclp;
}
if (!(sclp_async_register.sclp_receive_mask & EVTYP_ASYNC_MASK)) {
rc = -EOPNOTSUPP;
goto out_sclp;
}
rc = -ENOMEM;
request = kzalloc(sizeof(struct sclp_req), GFP_KERNEL);
if (!request)
goto out_sys;
sccb = (struct sclp_async_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
goto out_mem;
rc = atomic_notifier_chain_register(&panic_notifier_list,
&call_home_panic_nb);
if (rc)
goto out_mem;
strncpy(nodename, init_utsname()->nodename, 64);
return 0;
out_mem:
kfree(request);
free_page((unsigned long) sccb);
out_sys:
unregister_sysctl_table(callhome_sysctl_header);
out_sclp:
sclp_unregister(&sclp_async_register);
return rc;
}
module_init(sclp_async_init);
static void __exit sclp_async_exit(void)
{
atomic_notifier_chain_unregister(&panic_notifier_list,
&call_home_panic_nb);
unregister_sysctl_table(callhome_sysctl_header);
sclp_unregister(&sclp_async_register);
free_page((unsigned long) sccb);
kfree(request);
}
module_exit(sclp_async_exit);
MODULE_AUTHOR("Copyright IBM Corp. 2009");
MODULE_AUTHOR("Hans-Joachim Picht <hans@linux.vnet.ibm.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SCLP Asynchronous Notification Records");

View File

@ -8,7 +8,7 @@
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#define KMSG_COMPONENT "tape"
#define KMSG_COMPONENT "tape_34xx"
#include <linux/module.h>
#include <linux/init.h>

View File

@ -8,7 +8,7 @@
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#define KMSG_COMPONENT "tape"
#define KMSG_COMPONENT "tape_3590"
#include <linux/module.h>
#include <linux/init.h>
@ -39,8 +39,6 @@ EXPORT_SYMBOL(TAPE_DBF_AREA);
* - Read Alternate: implemented
*******************************************************************/
#define KMSG_COMPONENT "tape"
static const char *tape_3590_msg[TAPE_3590_MAX_MSG] = {
[0x00] = "",
[0x10] = "Lost Sense",

View File

@ -302,8 +302,6 @@ tapeblock_revalidate_disk(struct gendisk *disk)
if (!device->blk_data.medium_changed)
return 0;
dev_info(&device->cdev->dev, "Determining the size of the recorded "
"area...\n");
rc = tape_mtop(device, MTFSFM, 1);
if (rc)
return rc;
@ -312,6 +310,8 @@ tapeblock_revalidate_disk(struct gendisk *disk)
if (rc < 0)
return rc;
pr_info("%s: Determining the size of the recorded area...\n",
dev_name(&device->cdev->dev));
DBF_LH(3, "Image file ends at %d\n", rc);
nr_of_blks = rc;
@ -330,8 +330,8 @@ tapeblock_revalidate_disk(struct gendisk *disk)
device->bof = rc;
nr_of_blks -= rc;
dev_info(&device->cdev->dev, "The size of the recorded area is %i "
"blocks\n", nr_of_blks);
pr_info("%s: The size of the recorded area is %i blocks\n",
dev_name(&device->cdev->dev), nr_of_blks);
set_capacity(device->blk_data.disk,
nr_of_blks*(TAPEBLOCK_HSEC_SIZE/512));
@ -366,8 +366,8 @@ tapeblock_open(struct block_device *bdev, fmode_t mode)
if (device->required_tapemarks) {
DBF_EVENT(2, "TBLOCK: missing tapemarks\n");
dev_warn(&device->cdev->dev, "Opening the tape failed because"
" of missing end-of-file marks\n");
pr_warning("%s: Opening the tape failed because of missing "
"end-of-file marks\n", dev_name(&device->cdev->dev));
rc = -EPERM;
goto put_device;
}

View File

@ -214,13 +214,15 @@ tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate)
switch(newstate){
case MS_UNLOADED:
device->tape_generic_status |= GMT_DR_OPEN(~0);
dev_info(&device->cdev->dev, "The tape cartridge has been "
"successfully unloaded\n");
if (device->medium_state == MS_LOADED)
pr_info("%s: The tape cartridge has been successfully "
"unloaded\n", dev_name(&device->cdev->dev));
break;
case MS_LOADED:
device->tape_generic_status &= ~GMT_DR_OPEN(~0);
dev_info(&device->cdev->dev, "A tape cartridge has been "
"mounted\n");
if (device->medium_state == MS_UNLOADED)
pr_info("%s: A tape cartridge has been mounted\n",
dev_name(&device->cdev->dev));
break;
default:
// print nothing
@ -358,11 +360,11 @@ tape_generic_online(struct tape_device *device,
out_char:
tapechar_cleanup_device(device);
out_minor:
tape_remove_minor(device);
out_discipline:
device->discipline->cleanup_device(device);
device->discipline = NULL;
out_minor:
tape_remove_minor(device);
out:
module_put(discipline->owner);
return rc;
@ -654,8 +656,8 @@ tape_generic_remove(struct ccw_device *cdev)
*/
DBF_EVENT(3, "(%08x): Drive in use vanished!\n",
device->cdev_id);
dev_warn(&device->cdev->dev, "A tape unit was detached"
" while in use\n");
pr_warning("%s: A tape unit was detached while in "
"use\n", dev_name(&device->cdev->dev));
tape_state_set(device, TS_NOT_OPER);
__tape_discard_requests(device);
spin_unlock_irq(get_ccwdev_lock(device->cdev));

View File

@ -68,7 +68,7 @@ tape_std_assign(struct tape_device *device)
* to another host (actually this shouldn't happen but it does).
* So we set up a timeout for this call.
*/
init_timer(&timeout);
init_timer_on_stack(&timeout);
timeout.function = tape_std_assign_timeout;
timeout.data = (unsigned long) request;
timeout.expires = jiffies + 2 * HZ;

View File

@ -765,8 +765,10 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
} else
return -ENOMEM;
ret = device_register(dev);
if (ret)
if (ret) {
put_device(dev);
return ret;
}
ret = sysfs_create_group(&dev->kobj, &vmlogrdr_attr_group);
if (ret) {

View File

@ -1026,9 +1026,15 @@ static int __init ur_init(void)
debug_set_level(vmur_dbf, 6);
vmur_class = class_create(THIS_MODULE, "vmur");
if (IS_ERR(vmur_class)) {
rc = PTR_ERR(vmur_class);
goto fail_free_dbf;
}
rc = ccw_driver_register(&ur_driver);
if (rc)
goto fail_free_dbf;
goto fail_class_destroy;
rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur");
if (rc) {
@ -1038,18 +1044,13 @@ static int __init ur_init(void)
}
ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0);
vmur_class = class_create(THIS_MODULE, "vmur");
if (IS_ERR(vmur_class)) {
rc = PTR_ERR(vmur_class);
goto fail_unregister_region;
}
pr_info("%s loaded.\n", ur_banner);
return 0;
fail_unregister_region:
unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS);
fail_unregister_driver:
ccw_driver_unregister(&ur_driver);
fail_class_destroy:
class_destroy(vmur_class);
fail_free_dbf:
debug_unregister(vmur_dbf);
return rc;
@ -1057,9 +1058,9 @@ static int __init ur_init(void)
static void __exit ur_exit(void)
{
class_destroy(vmur_class);
unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS);
ccw_driver_unregister(&ur_driver);
class_destroy(vmur_class);
debug_unregister(vmur_dbf);
pr_info("%s unloaded.\n", ur_banner);
}

View File

@ -275,7 +275,7 @@ struct zcore_header {
u32 num_pages;
u32 pad1;
u64 tod;
cpuid_t cpu_id;
struct cpuid cpu_id;
u32 arch_id;
u32 volnr;
u32 build_arch;

View File

@ -2,7 +2,7 @@
# Makefile for the S/390 common i/o drivers
#
obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o scsw.o \
obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \
fcx.o itcw.o crw.o
ccw_device-objs += device.o device_fsm.o device_ops.o
ccw_device-objs += device_id.o device_pgid.o device_status.o

View File

@ -417,7 +417,8 @@ int chp_new(struct chp_id chpid)
if (ret) {
CIO_MSG_EVENT(0, "Could not register chp%x.%02x: %d\n",
chpid.cssid, chpid.id, ret);
goto out_free;
put_device(&chp->dev);
goto out;
}
ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
if (ret) {

View File

@ -37,29 +37,6 @@ struct channel_path_desc {
struct channel_path;
struct css_general_char {
u64 : 12;
u32 dynio : 1; /* bit 12 */
u32 : 28;
u32 aif : 1; /* bit 41 */
u32 : 3;
u32 mcss : 1; /* bit 45 */
u32 fcs : 1; /* bit 46 */
u32 : 1;
u32 ext_mb : 1; /* bit 48 */
u32 : 7;
u32 aif_tdd : 1; /* bit 56 */
u32 : 1;
u32 qebsm : 1; /* bit 58 */
u32 : 8;
u32 aif_osa : 1; /* bit 67 */
u32 : 14;
u32 cib : 1; /* bit 82 */
u32 : 5;
u32 fcx : 1; /* bit 88 */
u32 : 7;
}__attribute__((packed));
struct css_chsc_char {
u64 res;
u64 : 20;
@ -72,7 +49,6 @@ struct css_chsc_char {
u32 : 19;
}__attribute__((packed));
extern struct css_general_char css_general_characteristics;
extern struct css_chsc_char css_chsc_characteristics;
struct chsc_ssd_info {

View File

@ -139,12 +139,11 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
__u8 lpm, /* logical path mask */
__u8 key) /* storage key */
{
char dbf_txt[15];
int ccode;
union orb *orb;
CIO_TRACE_EVENT(4, "stIO");
CIO_TRACE_EVENT(4, dev_name(&sch->dev));
CIO_TRACE_EVENT(5, "stIO");
CIO_TRACE_EVENT(5, dev_name(&sch->dev));
orb = &to_io_private(sch)->orb;
memset(orb, 0, sizeof(union orb));
@ -169,8 +168,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
ccode = ssch(sch->schid, orb);
/* process condition code */
sprintf(dbf_txt, "ccode:%d", ccode);
CIO_TRACE_EVENT(4, dbf_txt);
CIO_HEX_EVENT(5, &ccode, sizeof(ccode));
switch (ccode) {
case 0:
@ -201,16 +199,14 @@ cio_start (struct subchannel *sch, struct ccw1 *cpa, __u8 lpm)
int
cio_resume (struct subchannel *sch)
{
char dbf_txt[15];
int ccode;
CIO_TRACE_EVENT (4, "resIO");
CIO_TRACE_EVENT(4, "resIO");
CIO_TRACE_EVENT(4, dev_name(&sch->dev));
ccode = rsch (sch->schid);
sprintf (dbf_txt, "ccode:%d", ccode);
CIO_TRACE_EVENT (4, dbf_txt);
CIO_HEX_EVENT(4, &ccode, sizeof(ccode));
switch (ccode) {
case 0:
@ -235,13 +231,12 @@ cio_resume (struct subchannel *sch)
int
cio_halt(struct subchannel *sch)
{
char dbf_txt[15];
int ccode;
if (!sch)
return -ENODEV;
CIO_TRACE_EVENT (2, "haltIO");
CIO_TRACE_EVENT(2, "haltIO");
CIO_TRACE_EVENT(2, dev_name(&sch->dev));
/*
@ -249,8 +244,7 @@ cio_halt(struct subchannel *sch)
*/
ccode = hsch (sch->schid);
sprintf (dbf_txt, "ccode:%d", ccode);
CIO_TRACE_EVENT (2, dbf_txt);
CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
switch (ccode) {
case 0:
@ -270,13 +264,12 @@ cio_halt(struct subchannel *sch)
int
cio_clear(struct subchannel *sch)
{
char dbf_txt[15];
int ccode;
if (!sch)
return -ENODEV;
CIO_TRACE_EVENT (2, "clearIO");
CIO_TRACE_EVENT(2, "clearIO");
CIO_TRACE_EVENT(2, dev_name(&sch->dev));
/*
@ -284,8 +277,7 @@ cio_clear(struct subchannel *sch)
*/
ccode = csch (sch->schid);
sprintf (dbf_txt, "ccode:%d", ccode);
CIO_TRACE_EVENT (2, dbf_txt);
CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
switch (ccode) {
case 0:
@ -306,19 +298,17 @@ cio_clear(struct subchannel *sch)
int
cio_cancel (struct subchannel *sch)
{
char dbf_txt[15];
int ccode;
if (!sch)
return -ENODEV;
CIO_TRACE_EVENT (2, "cancelIO");
CIO_TRACE_EVENT(2, "cancelIO");
CIO_TRACE_EVENT(2, dev_name(&sch->dev));
ccode = xsch (sch->schid);
sprintf (dbf_txt, "ccode:%d", ccode);
CIO_TRACE_EVENT (2, dbf_txt);
CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
switch (ccode) {
case 0: /* success */
@ -429,11 +419,10 @@ EXPORT_SYMBOL_GPL(cio_update_schib);
*/
int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
{
char dbf_txt[15];
int retry;
int ret;
CIO_TRACE_EVENT (2, "ensch");
CIO_TRACE_EVENT(2, "ensch");
CIO_TRACE_EVENT(2, dev_name(&sch->dev));
if (sch_is_pseudo_sch(sch))
@ -460,8 +449,7 @@ int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
} else
break;
}
sprintf (dbf_txt, "ret:%d", ret);
CIO_TRACE_EVENT (2, dbf_txt);
CIO_HEX_EVENT(2, &ret, sizeof(ret));
return ret;
}
EXPORT_SYMBOL_GPL(cio_enable_subchannel);
@ -472,11 +460,10 @@ EXPORT_SYMBOL_GPL(cio_enable_subchannel);
*/
int cio_disable_subchannel(struct subchannel *sch)
{
char dbf_txt[15];
int retry;
int ret;
CIO_TRACE_EVENT (2, "dissch");
CIO_TRACE_EVENT(2, "dissch");
CIO_TRACE_EVENT(2, dev_name(&sch->dev));
if (sch_is_pseudo_sch(sch))
@ -495,8 +482,7 @@ int cio_disable_subchannel(struct subchannel *sch)
} else
break;
}
sprintf (dbf_txt, "ret:%d", ret);
CIO_TRACE_EVENT (2, dbf_txt);
CIO_HEX_EVENT(2, &ret, sizeof(ret));
return ret;
}
EXPORT_SYMBOL_GPL(cio_disable_subchannel);
@ -578,11 +564,6 @@ int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
goto out;
}
mutex_init(&sch->reg_mutex);
/* Set a name for the subchannel */
if (cio_is_console(schid))
sch->dev.init_name = cio_get_console_sch_name(schid);
else
dev_set_name(&sch->dev, "0.%x.%04x", schid.ssid, schid.sch_no);
/*
* The first subchannel that is not-operational (ccode==3)
@ -686,7 +667,6 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
#ifdef CONFIG_CCW_CONSOLE
static struct subchannel console_subchannel;
static char console_sch_name[10] = "0.x.xxxx";
static struct io_subchannel_private console_priv;
static int console_subchannel_in_use;
@ -873,12 +853,6 @@ cio_get_console_subchannel(void)
return &console_subchannel;
}
const char *cio_get_console_sch_name(struct subchannel_id schid)
{
snprintf(console_sch_name, 10, "0.%x.%04x", schid.ssid, schid.sch_no);
return (const char *)console_sch_name;
}
#endif
static int
__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)

View File

@ -133,15 +133,11 @@ extern int cio_is_console(struct subchannel_id);
extern struct subchannel *cio_get_console_subchannel(void);
extern spinlock_t * cio_get_console_lock(void);
extern void *cio_get_console_priv(void);
extern const char *cio_get_console_sch_name(struct subchannel_id schid);
extern const char *cio_get_console_cdev_name(struct subchannel *sch);
#else
#define cio_is_console(schid) 0
#define cio_get_console_subchannel() NULL
#define cio_get_console_lock() NULL
#define cio_get_console_priv() NULL
#define cio_get_console_sch_name(schid) NULL
#define cio_get_console_cdev_name(sch) NULL
#endif
#endif

View File

@ -151,18 +151,6 @@ css_alloc_subchannel(struct subchannel_id schid)
return sch;
}
static void
css_free_subchannel(struct subchannel *sch)
{
if (sch) {
/* Reset intparm to zeroes. */
sch->config.intparm = 0;
cio_commit_config(sch);
kfree(sch->lock);
kfree(sch);
}
}
static void
css_subchannel_release(struct device *dev)
{
@ -170,6 +158,9 @@ css_subchannel_release(struct device *dev)
sch = to_subchannel(dev);
if (!cio_is_console(sch->schid)) {
/* Reset intparm to zeroes. */
sch->config.intparm = 0;
cio_commit_config(sch);
kfree(sch->lock);
kfree(sch);
}
@ -180,6 +171,8 @@ static int css_sch_device_register(struct subchannel *sch)
int ret;
mutex_lock(&sch->reg_mutex);
dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
sch->schid.sch_no);
ret = device_register(&sch->dev);
mutex_unlock(&sch->reg_mutex);
return ret;
@ -327,7 +320,7 @@ int css_probe_device(struct subchannel_id schid)
return PTR_ERR(sch);
ret = css_register_subchannel(sch);
if (ret)
css_free_subchannel(sch);
put_device(&sch->dev);
return ret;
}
@ -644,7 +637,10 @@ __init_channel_subsystem(struct subchannel_id schid, void *data)
* not working) so we do it now. This is true e.g. for the
* console subchannel.
*/
css_register_subchannel(sch);
if (css_register_subchannel(sch)) {
if (!cio_is_console(schid))
put_device(&sch->dev);
}
return 0;
}
@ -661,8 +657,8 @@ css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
css->global_pgid.pgid_high.cpu_addr = 0;
#endif
}
css->global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident;
css->global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine;
css->global_pgid.cpu_id = S390_lowcore.cpu_id.ident;
css->global_pgid.cpu_model = S390_lowcore.cpu_id.machine;
css->global_pgid.tod_high = tod_high;
}
@ -920,8 +916,10 @@ init_channel_subsystem (void)
goto out_device;
}
ret = device_register(&css->pseudo_subchannel->dev);
if (ret)
if (ret) {
put_device(&css->pseudo_subchannel->dev);
goto out_file;
}
}
ret = register_reboot_notifier(&css_reboot_notifier);
if (ret)

View File

@ -307,8 +307,11 @@ int ccw_device_is_orphan(struct ccw_device *cdev)
static void ccw_device_unregister(struct ccw_device *cdev)
{
if (test_and_clear_bit(1, &cdev->private->registered))
if (test_and_clear_bit(1, &cdev->private->registered)) {
device_del(&cdev->dev);
/* Release reference from device_initialize(). */
put_device(&cdev->dev);
}
}
static void ccw_device_remove_orphan_cb(struct work_struct *work)
@ -319,7 +322,6 @@ static void ccw_device_remove_orphan_cb(struct work_struct *work)
priv = container_of(work, struct ccw_device_private, kick_work);
cdev = priv->cdev;
ccw_device_unregister(cdev);
put_device(&cdev->dev);
/* Release cdev reference for workqueue processing. */
put_device(&cdev->dev);
}
@ -333,15 +335,15 @@ ccw_device_remove_disconnected(struct ccw_device *cdev)
* Forced offline in disconnected state means
* 'throw away device'.
*/
/* Get cdev reference for workqueue processing. */
if (!get_device(&cdev->dev))
return;
if (ccw_device_is_orphan(cdev)) {
/*
* Deregister ccw device.
* Unfortunately, we cannot do this directly from the
* attribute method.
*/
/* Get cdev reference for workqueue processing. */
if (!get_device(&cdev->dev))
return;
spin_lock_irqsave(cdev->ccwlock, flags);
cdev->private->state = DEV_STATE_NOT_OPER;
spin_unlock_irqrestore(cdev->ccwlock, flags);
@ -380,30 +382,34 @@ int ccw_device_set_offline(struct ccw_device *cdev)
}
cdev->online = 0;
spin_lock_irq(cdev->ccwlock);
ret = ccw_device_offline(cdev);
if (ret == -ENODEV) {
if (cdev->private->state != DEV_STATE_NOT_OPER) {
cdev->private->state = DEV_STATE_OFFLINE;
dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
}
/* Wait until a final state or DISCONNECTED is reached */
while (!dev_fsm_final_state(cdev) &&
cdev->private->state != DEV_STATE_DISCONNECTED) {
spin_unlock_irq(cdev->ccwlock);
/* Give up reference from ccw_device_set_online(). */
put_device(&cdev->dev);
return ret;
wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
cdev->private->state == DEV_STATE_DISCONNECTED));
spin_lock_irq(cdev->ccwlock);
}
ret = ccw_device_offline(cdev);
if (ret)
goto error;
spin_unlock_irq(cdev->ccwlock);
if (ret == 0) {
wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
/* Give up reference from ccw_device_set_online(). */
put_device(&cdev->dev);
} else {
CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
"device 0.%x.%04x\n",
ret, cdev->private->dev_id.ssid,
cdev->private->dev_id.devno);
cdev->online = 1;
}
return ret;
wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
cdev->private->state == DEV_STATE_DISCONNECTED));
/* Give up reference from ccw_device_set_online(). */
put_device(&cdev->dev);
return 0;
error:
CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device 0.%x.%04x\n",
ret, cdev->private->dev_id.ssid,
cdev->private->dev_id.devno);
cdev->private->state = DEV_STATE_OFFLINE;
dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
spin_unlock_irq(cdev->ccwlock);
/* Give up reference from ccw_device_set_online(). */
put_device(&cdev->dev);
return -ENODEV;
}
/**
@ -421,6 +427,7 @@ int ccw_device_set_offline(struct ccw_device *cdev)
int ccw_device_set_online(struct ccw_device *cdev)
{
int ret;
int ret2;
if (!cdev)
return -ENODEV;
@ -444,28 +451,53 @@ int ccw_device_set_online(struct ccw_device *cdev)
put_device(&cdev->dev);
return ret;
}
if (cdev->private->state != DEV_STATE_ONLINE) {
spin_lock_irq(cdev->ccwlock);
/* Check if online processing was successful */
if ((cdev->private->state != DEV_STATE_ONLINE) &&
(cdev->private->state != DEV_STATE_W4SENSE)) {
spin_unlock_irq(cdev->ccwlock);
/* Give up online reference since onlining failed. */
put_device(&cdev->dev);
return -ENODEV;
}
if (!cdev->drv->set_online || cdev->drv->set_online(cdev) == 0) {
cdev->online = 1;
return 0;
}
spin_lock_irq(cdev->ccwlock);
ret = ccw_device_offline(cdev);
spin_unlock_irq(cdev->ccwlock);
if (ret == 0)
wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
else
CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
"device 0.%x.%04x\n",
ret, cdev->private->dev_id.ssid,
cdev->private->dev_id.devno);
if (cdev->drv->set_online)
ret = cdev->drv->set_online(cdev);
if (ret)
goto rollback;
cdev->online = 1;
return 0;
rollback:
spin_lock_irq(cdev->ccwlock);
/* Wait until a final state or DISCONNECTED is reached */
while (!dev_fsm_final_state(cdev) &&
cdev->private->state != DEV_STATE_DISCONNECTED) {
spin_unlock_irq(cdev->ccwlock);
wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
cdev->private->state == DEV_STATE_DISCONNECTED));
spin_lock_irq(cdev->ccwlock);
}
ret2 = ccw_device_offline(cdev);
if (ret2)
goto error;
spin_unlock_irq(cdev->ccwlock);
wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
cdev->private->state == DEV_STATE_DISCONNECTED));
/* Give up online reference since onlining failed. */
put_device(&cdev->dev);
return (ret == 0) ? -ENODEV : ret;
return ret;
error:
CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, "
"device 0.%x.%04x\n",
ret2, cdev->private->dev_id.ssid,
cdev->private->dev_id.devno);
cdev->private->state = DEV_STATE_OFFLINE;
spin_unlock_irq(cdev->ccwlock);
/* Give up online reference since onlining failed. */
put_device(&cdev->dev);
return ret;
}
static int online_store_handle_offline(struct ccw_device *cdev)
@ -637,8 +669,12 @@ static int ccw_device_register(struct ccw_device *cdev)
int ret;
dev->bus = &ccw_bus_type;
if ((ret = device_add(dev)))
ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
cdev->private->dev_id.devno);
if (ret)
return ret;
ret = device_add(dev);
if (ret)
return ret;
set_bit(1, &cdev->private->registered);
@ -1024,9 +1060,6 @@ static void ccw_device_call_sch_unregister(struct work_struct *work)
return;
sch = to_subchannel(cdev->dev.parent);
css_sch_device_unregister(sch);
/* Reset intparm to zeroes. */
sch->config.intparm = 0;
cio_commit_config(sch);
/* Release cdev reference for workqueue processing.*/
put_device(&cdev->dev);
/* Release subchannel reference for local processing. */
@ -1035,6 +1068,9 @@ static void ccw_device_call_sch_unregister(struct work_struct *work)
void ccw_device_schedule_sch_unregister(struct ccw_device *cdev)
{
/* Get cdev reference for workqueue processing. */
if (!get_device(&cdev->dev))
return;
PREPARE_WORK(&cdev->private->kick_work,
ccw_device_call_sch_unregister);
queue_work(slow_path_wq, &cdev->private->kick_work);
@ -1055,9 +1091,6 @@ io_subchannel_recog_done(struct ccw_device *cdev)
/* Device did not respond in time. */
case DEV_STATE_NOT_OPER:
cdev->private->flags.recog_done = 1;
/* Remove device found not operational. */
if (!get_device(&cdev->dev))
break;
ccw_device_schedule_sch_unregister(cdev);
if (atomic_dec_and_test(&ccw_device_init_count))
wake_up(&ccw_device_init_wq);
@ -1095,13 +1128,6 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
init_waitqueue_head(&priv->wait_q);
init_timer(&priv->timer);
/* Set an initial name for the device. */
if (cio_is_console(sch->schid))
cdev->dev.init_name = cio_get_console_cdev_name(sch);
else
dev_set_name(&cdev->dev, "0.%x.%04x",
sch->schid.ssid, sch->schib.pmcw.dev);
/* Increase counter of devices currently in recognition. */
atomic_inc(&ccw_device_init_count);
@ -1171,8 +1197,8 @@ static void io_subchannel_irq(struct subchannel *sch)
cdev = sch_get_cdev(sch);
CIO_TRACE_EVENT(3, "IRQ");
CIO_TRACE_EVENT(3, dev_name(&sch->dev));
CIO_TRACE_EVENT(6, "IRQ");
CIO_TRACE_EVENT(6, dev_name(&sch->dev));
if (cdev)
dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
}
@ -1210,9 +1236,6 @@ static void io_subchannel_do_unreg(struct work_struct *work)
sch = container_of(work, struct subchannel, work);
css_sch_device_unregister(sch);
/* Reset intparm to zeroes. */
sch->config.intparm = 0;
cio_commit_config(sch);
put_device(&sch->dev);
}
@ -1334,7 +1357,6 @@ io_subchannel_remove (struct subchannel *sch)
cdev->private->state = DEV_STATE_NOT_OPER;
spin_unlock_irqrestore(cdev->ccwlock, flags);
ccw_device_unregister(cdev);
put_device(&cdev->dev);
kfree(sch->private);
sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
return 0;
@ -1571,8 +1593,6 @@ static int purge_fn(struct device *dev, void *data)
spin_unlock_irq(cdev->ccwlock);
if (!unreg)
goto out;
if (!get_device(&cdev->dev))
goto out;
CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", priv->dev_id.ssid,
priv->dev_id.devno);
ccw_device_schedule_sch_unregister(cdev);
@ -1688,10 +1708,6 @@ static int io_subchannel_sch_event(struct subchannel *sch, int slow)
spin_unlock_irqrestore(sch->lock, flags);
css_sch_device_unregister(sch);
spin_lock_irqsave(sch->lock, flags);
/* Reset intparm to zeroes. */
sch->config.intparm = 0;
cio_commit_config(sch);
break;
case REPROBE:
ccw_device_trigger_reprobe(cdev);
@ -1712,7 +1728,6 @@ static int io_subchannel_sch_event(struct subchannel *sch, int slow)
#ifdef CONFIG_CCW_CONSOLE
static struct ccw_device console_cdev;
static char console_cdev_name[10] = "0.x.xxxx";
static struct ccw_device_private console_private;
static int console_cdev_in_use;
@ -1796,13 +1811,6 @@ int ccw_device_force_console(void)
return ccw_device_pm_restore(&console_cdev.dev);
}
EXPORT_SYMBOL_GPL(ccw_device_force_console);
const char *cio_get_console_cdev_name(struct subchannel *sch)
{
snprintf(console_cdev_name, 10, "0.%x.%04x",
sch->schid.ssid, sch->schib.pmcw.dev);
return (const char *)console_cdev_name;
}
#endif
/*
@ -2020,7 +2028,9 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev)
spin_unlock_irq(sch->lock);
if (ret) {
CIO_MSG_EVENT(0, "Couldn't start recognition for device "
"%s (ret=%d)\n", dev_name(&cdev->dev), ret);
"0.%x.%04x (ret=%d)\n",
cdev->private->dev_id.ssid,
cdev->private->dev_id.devno, ret);
spin_lock_irq(sch->lock);
cdev->private->state = DEV_STATE_DISCONNECTED;
spin_unlock_irq(sch->lock);
@ -2083,8 +2093,9 @@ static int ccw_device_pm_restore(struct device *dev)
}
/* check if the device id has changed */
if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
CIO_MSG_EVENT(0, "resume: sch %s: failed (devno changed from "
"%04x to %04x)\n", dev_name(&sch->dev),
CIO_MSG_EVENT(0, "resume: sch 0.%x.%04x: failed (devno "
"changed from %04x to %04x)\n",
sch->schid.ssid, sch->schid.sch_no,
cdev->private->dev_id.devno,
sch->schib.pmcw.dev);
goto out_unreg_unlock;
@ -2117,8 +2128,9 @@ static int ccw_device_pm_restore(struct device *dev)
if (cm_enabled) {
ret = ccw_set_cmf(cdev, 1);
if (ret) {
CIO_MSG_EVENT(2, "resume: cdev %s: cmf failed "
"(rc=%d)\n", dev_name(&cdev->dev), ret);
CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed "
"(rc=%d)\n", cdev->private->dev_id.ssid,
cdev->private->dev_id.devno, ret);
ret = 0;
}
}

View File

@ -394,6 +394,13 @@ ccw_device_done(struct ccw_device *cdev, int state)
ccw_device_schedule_sch_unregister(cdev);
cdev->private->flags.donotify = 0;
}
if (state == DEV_STATE_NOT_OPER) {
CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n",
cdev->private->dev_id.devno, sch->schid.sch_no);
if (!ccw_device_notify(cdev, CIO_GONE))
ccw_device_schedule_sch_unregister(cdev);
cdev->private->flags.donotify = 0;
}
if (cdev->private->flags.donotify) {
cdev->private->flags.donotify = 0;
@ -730,6 +737,17 @@ static void ccw_device_generic_notoper(struct ccw_device *cdev,
css_schedule_eval(sch->schid);
}
/*
* Handle path verification event in offline state.
*/
static void ccw_device_offline_verify(struct ccw_device *cdev,
enum dev_event dev_event)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
css_schedule_eval(sch->schid);
}
/*
* Handle path verification event.
*/
@ -887,6 +905,8 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
}
call_handler:
cdev->private->state = DEV_STATE_ONLINE;
/* In case sensing interfered with setting the device online */
wake_up(&cdev->private->wait_q);
/* Call the handler. */
if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
/* Start delayed path verification. */
@ -1149,7 +1169,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
[DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_offline_irq,
[DEV_EVENT_TIMEOUT] = ccw_device_nop,
[DEV_EVENT_VERIFY] = ccw_device_nop,
[DEV_EVENT_VERIFY] = ccw_device_offline_verify,
},
[DEV_STATE_VERIFY] = {
[DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,

View File

@ -1,7 +1,7 @@
/*
* linux/drivers/s390/cio/qdio.h
*
* Copyright 2000,2008 IBM Corp.
* Copyright 2000,2009 IBM Corp.
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>
* Jan Glauber <jang@linux.vnet.ibm.com>
*/
@ -246,6 +246,7 @@ struct qdio_q {
atomic_t nr_buf_used;
struct qdio_irq *irq_ptr;
struct dentry *debugfs_q;
struct tasklet_struct tasklet;
/* error condition during a data transfer */
@ -267,6 +268,7 @@ struct qdio_irq {
struct qib qib;
u32 *dsci; /* address of device state change indicator */
struct ccw_device *cdev;
struct dentry *debugfs_dev;
unsigned long int_parm;
struct subchannel_id schid;

View File

@ -1,14 +1,12 @@
/*
* drivers/s390/cio/qdio_debug.c
*
* Copyright IBM Corp. 2008
* Copyright IBM Corp. 2008,2009
*
* Author: Jan Glauber (jang@linux.vnet.ibm.com)
*/
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <asm/qdio.h>
#include <asm/debug.h>
#include "qdio_debug.h"
#include "qdio.h"
@ -17,10 +15,7 @@ debug_info_t *qdio_dbf_setup;
debug_info_t *qdio_dbf_error;
static struct dentry *debugfs_root;
#define MAX_DEBUGFS_QUEUES 32
static struct dentry *debugfs_queues[MAX_DEBUGFS_QUEUES] = { NULL };
static DEFINE_MUTEX(debugfs_mutex);
#define QDIO_DEBUGFS_NAME_LEN 40
#define QDIO_DEBUGFS_NAME_LEN 10
void qdio_allocate_dbf(struct qdio_initialize *init_data,
struct qdio_irq *irq_ptr)
@ -130,20 +125,6 @@ static int qstat_seq_open(struct inode *inode, struct file *filp)
filp->f_path.dentry->d_inode->i_private);
}
static void remove_debugfs_entry(struct qdio_q *q)
{
int i;
for (i = 0; i < MAX_DEBUGFS_QUEUES; i++) {
if (!debugfs_queues[i])
continue;
if (debugfs_queues[i]->d_inode->i_private == q) {
debugfs_remove(debugfs_queues[i]);
debugfs_queues[i] = NULL;
}
}
}
static struct file_operations debugfs_fops = {
.owner = THIS_MODULE,
.open = qstat_seq_open,
@ -155,22 +136,15 @@ static struct file_operations debugfs_fops = {
static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev)
{
int i = 0;
char name[QDIO_DEBUGFS_NAME_LEN];
while (debugfs_queues[i] != NULL) {
i++;
if (i >= MAX_DEBUGFS_QUEUES)
return;
}
snprintf(name, QDIO_DEBUGFS_NAME_LEN, "%s_%s_%d",
dev_name(&cdev->dev),
snprintf(name, QDIO_DEBUGFS_NAME_LEN, "%s_%d",
q->is_input_q ? "input" : "output",
q->nr);
debugfs_queues[i] = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR,
debugfs_root, q, &debugfs_fops);
if (IS_ERR(debugfs_queues[i]))
debugfs_queues[i] = NULL;
q->debugfs_q = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR,
q->irq_ptr->debugfs_dev, q, &debugfs_fops);
if (IS_ERR(q->debugfs_q))
q->debugfs_q = NULL;
}
void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
@ -178,12 +152,14 @@ void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
struct qdio_q *q;
int i;
mutex_lock(&debugfs_mutex);
irq_ptr->debugfs_dev = debugfs_create_dir(dev_name(&cdev->dev),
debugfs_root);
if (IS_ERR(irq_ptr->debugfs_dev))
irq_ptr->debugfs_dev = NULL;
for_each_input_queue(irq_ptr, q, i)
setup_debugfs_entry(q, cdev);
for_each_output_queue(irq_ptr, q, i)
setup_debugfs_entry(q, cdev);
mutex_unlock(&debugfs_mutex);
}
void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
@ -191,17 +167,16 @@ void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cd
struct qdio_q *q;
int i;
mutex_lock(&debugfs_mutex);
for_each_input_queue(irq_ptr, q, i)
remove_debugfs_entry(q);
debugfs_remove(q->debugfs_q);
for_each_output_queue(irq_ptr, q, i)
remove_debugfs_entry(q);
mutex_unlock(&debugfs_mutex);
debugfs_remove(q->debugfs_q);
debugfs_remove(irq_ptr->debugfs_dev);
}
int __init qdio_debug_init(void)
{
debugfs_root = debugfs_create_dir("qdio_queues", NULL);
debugfs_root = debugfs_create_dir("qdio", NULL);
qdio_dbf_setup = debug_register("qdio_setup", 16, 1, 16);
debug_register_view(qdio_dbf_setup, &debug_hex_ascii_view);

View File

@ -798,8 +798,10 @@ static void __tiqdio_inbound_processing(struct qdio_q *q)
if (!qdio_inbound_q_done(q)) {
qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
tasklet_schedule(&q->tasklet);
return;
}
}
qdio_stop_polling(q);

View File

@ -648,7 +648,9 @@ static int ap_bus_suspend(struct device *dev, pm_message_t state)
/* Poll on the device until all requests are finished. */
do {
flags = 0;
spin_lock_bh(&ap_dev->lock);
__ap_poll_device(ap_dev, &flags);
spin_unlock_bh(&ap_dev->lock);
} while ((flags & 1) || (flags & 2));
ap_device_remove(dev);
@ -1109,12 +1111,15 @@ static void ap_scan_bus(struct work_struct *unused)
ap_dev->device.bus = &ap_bus_type;
ap_dev->device.parent = ap_root_device;
dev_set_name(&ap_dev->device, "card%02x",
AP_QID_DEVICE(ap_dev->qid));
if (dev_set_name(&ap_dev->device, "card%02x",
AP_QID_DEVICE(ap_dev->qid))) {
kfree(ap_dev);
continue;
}
ap_dev->device.release = ap_device_release;
rc = device_register(&ap_dev->device);
if (rc) {
kfree(ap_dev);
put_device(&ap_dev->device);
continue;
}
/* Add device attributes. */
@ -1407,14 +1412,12 @@ static void ap_reset(struct ap_device *ap_dev)
static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags)
{
spin_lock(&ap_dev->lock);
if (!ap_dev->unregistered) {
if (ap_poll_queue(ap_dev, flags))
ap_dev->unregistered = 1;
if (ap_dev->reset == AP_RESET_DO)
ap_reset(ap_dev);
}
spin_unlock(&ap_dev->lock);
return 0;
}
@ -1441,7 +1444,9 @@ static void ap_poll_all(unsigned long dummy)
flags = 0;
spin_lock(&ap_device_list_lock);
list_for_each_entry(ap_dev, &ap_device_list, list) {
spin_lock(&ap_dev->lock);
__ap_poll_device(ap_dev, &flags);
spin_unlock(&ap_dev->lock);
}
spin_unlock(&ap_device_list_lock);
} while (flags & 1);
@ -1487,7 +1492,9 @@ static int ap_poll_thread(void *data)
flags = 0;
spin_lock_bh(&ap_device_list_lock);
list_for_each_entry(ap_dev, &ap_device_list, list) {
spin_lock(&ap_dev->lock);
__ap_poll_device(ap_dev, &flags);
spin_unlock(&ap_dev->lock);
}
spin_unlock_bh(&ap_device_list_lock);
}

View File

@ -403,10 +403,14 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
return len;
}
void __init s390_virtio_console_init(void)
static int __init s390_virtio_console_init(void)
{
virtio_cons_early_init(early_put_chars);
if (!MACHINE_IS_KVM)
return -ENODEV;
return virtio_cons_early_init(early_put_chars);
}
console_initcall(s390_virtio_console_init);
/*
* We do this after core stuff, but before the drivers.

View File

@ -1839,9 +1839,10 @@ static int netiucv_register_device(struct net_device *ndev)
return -ENOMEM;
ret = device_register(dev);
if (ret)
if (ret) {
put_device(dev);
return ret;
}
ret = netiucv_add_files(dev);
if (ret)
goto out_unreg;
@ -2226,8 +2227,10 @@ static int __init netiucv_init(void)
netiucv_dev->release = (void (*)(struct device *))kfree;
netiucv_dev->driver = &netiucv_driver;
rc = device_register(netiucv_dev);
if (rc)
if (rc) {
put_device(netiucv_dev);
goto out_driver;
}
netiucv_banner();
return rc;

View File

@ -219,13 +219,13 @@ static int __init smsg_init(void)
smsg_dev->driver = &smsg_driver;
rc = device_register(smsg_dev);
if (rc)
goto out_free_dev;
goto out_put;
cpcmd("SET SMSG IUCV", NULL, 0, NULL);
return 0;
out_free_dev:
kfree(smsg_dev);
out_put:
put_device(smsg_dev);
out_free_path:
iucv_path_free(smsg_path);
smsg_path = NULL;