Import Upstream version 1.643
This commit is contained in:
commit
385d03de81
|
@ -0,0 +1,575 @@
|
|||
/* vim: ts=8:sw=4:expandtab
|
||||
*
|
||||
* $Id$
|
||||
*
|
||||
* Copyright (c) 1994-2010 Tim Bunce Ireland
|
||||
*
|
||||
* See COPYRIGHT section in DBI.pm for usage and distribution rights.
|
||||
*/
|
||||
|
||||
/* DBI Interface Definitions for DBD Modules */
|
||||
|
||||
#ifndef DBIXS_VERSION /* prevent multiple inclusion */
|
||||
|
||||
#ifndef DBIS
|
||||
#define DBIS dbis /* default name for dbistate_t variable */
|
||||
#endif
|
||||
|
||||
/* Here for backwards compat. PERL_POLLUTE was removed in perl 5.13.3 */
|
||||
#define PERL_POLLUTE
|
||||
|
||||
/* first pull in the standard Perl header files for extensions */
|
||||
#include <EXTERN.h>
|
||||
#include <perl.h>
|
||||
#include <XSUB.h>
|
||||
|
||||
#ifdef debug /* causes problems with DBIS->debug */
|
||||
#undef debug
|
||||
#endif
|
||||
|
||||
#ifdef std /* causes problems with STLport <tscheresky@micron.com> */
|
||||
#undef std
|
||||
#endif
|
||||
|
||||
/* define DBIXS_REVISION */
|
||||
#include "dbixs_rev.h"
|
||||
|
||||
/* Perl backwards compatibility definitions */
|
||||
#define NEED_sv_2pv_flags
|
||||
#include "dbipport.h"
|
||||
|
||||
/* DBI SQL_* type definitions */
|
||||
#include "dbi_sql.h"
|
||||
|
||||
|
||||
#define DBIXS_VERSION 93 /* superseded by DBIXS_REVISION */
|
||||
|
||||
#ifdef NEED_DBIXS_VERSION
|
||||
#if NEED_DBIXS_VERSION > DBIXS_VERSION
|
||||
error You_need_to_upgrade_your_DBI_module_before_building_this_driver
|
||||
#endif
|
||||
#else
|
||||
#define NEED_DBIXS_VERSION DBIXS_VERSION
|
||||
#endif
|
||||
|
||||
|
||||
#define DBI_LOCK
|
||||
#define DBI_UNLOCK
|
||||
|
||||
#ifndef DBI_NO_THREADS
|
||||
#ifdef USE_ITHREADS
|
||||
#define DBI_USE_THREADS
|
||||
#endif /* USE_ITHREADS */
|
||||
#endif /* DBI_NO_THREADS */
|
||||
|
||||
|
||||
/* forward struct declarations */
|
||||
|
||||
typedef struct dbistate_st dbistate_t;
|
||||
/* implementor needs to define actual struct { dbih_??c_t com; ... }*/
|
||||
typedef struct imp_drh_st imp_drh_t; /* driver */
|
||||
typedef struct imp_dbh_st imp_dbh_t; /* database */
|
||||
typedef struct imp_sth_st imp_sth_t; /* statement */
|
||||
typedef struct imp_fdh_st imp_fdh_t; /* field descriptor */
|
||||
typedef struct imp_xxh_st imp_xxh_t; /* any (defined below) */
|
||||
#define DBI_imp_data_ imp_xxh_t /* friendly for take_imp_data */
|
||||
|
||||
|
||||
|
||||
/* --- DBI Handle Common Data Structure (all handles have one) --- */
|
||||
|
||||
/* Handle types. Code currently assumes child = parent + 1. */
|
||||
#define DBIt_DR 1
|
||||
#define DBIt_DB 2
|
||||
#define DBIt_ST 3
|
||||
#define DBIt_FD 4
|
||||
|
||||
/* component structures */
|
||||
|
||||
typedef struct dbih_com_std_st {
|
||||
U32 flags;
|
||||
int call_depth; /* used by DBI to track nested calls (int) */
|
||||
U16 type; /* DBIt_DR, DBIt_DB, DBIt_ST */
|
||||
HV *my_h; /* copy of outer handle HV (not refcounted) */
|
||||
SV *parent_h; /* parent inner handle (ref to hv) (r.c.inc) */
|
||||
imp_xxh_t *parent_com; /* parent com struct shortcut */
|
||||
PerlInterpreter * thr_user; /* thread that owns the handle */
|
||||
|
||||
HV *imp_stash; /* who is the implementor for this handle */
|
||||
SV *imp_data; /* optional implementors data (for perl imp's) */
|
||||
|
||||
I32 kids; /* count of db's for dr's, st's for db's etc */
|
||||
I32 active_kids; /* kids which are currently DBIc_ACTIVE */
|
||||
U32 pid; /* pid of process that created handle */
|
||||
dbistate_t *dbistate;
|
||||
} dbih_com_std_t;
|
||||
|
||||
typedef struct dbih_com_attr_st {
|
||||
/* These are copies of the Hash values (ref.cnt.inc'd) */
|
||||
/* Many of the hash values are themselves references */
|
||||
SV *TraceLevel;
|
||||
SV *State; /* Standard SQLSTATE, 5 char string */
|
||||
SV *Err; /* Native engine error code */
|
||||
SV *Errstr; /* Native engine error message */
|
||||
UV ErrCount;
|
||||
U32 LongReadLen; /* auto read length for long/blob types */
|
||||
SV *FetchHashKeyName; /* for fetchrow_hashref */
|
||||
/* (NEW FIELDS?... DON'T FORGET TO UPDATE dbih_clearcom()!) */
|
||||
} dbih_com_attr_t;
|
||||
|
||||
|
||||
struct dbih_com_st { /* complete core structure (typedef'd above) */
|
||||
dbih_com_std_t std;
|
||||
dbih_com_attr_t attr;
|
||||
};
|
||||
|
||||
/* This 'implementors' type the DBI defines by default as a way to */
|
||||
/* refer to the imp_??h data of a handle without considering its type. */
|
||||
struct imp_xxh_st { struct dbih_com_st com; };
|
||||
|
||||
/* Define handle-type specific structures for implementors to include */
|
||||
/* at the start of their private structures. */
|
||||
|
||||
typedef struct { /* -- DRIVER -- */
|
||||
dbih_com_std_t std;
|
||||
dbih_com_attr_t attr;
|
||||
HV *_old_cached_kids; /* not used, here for binary compat */
|
||||
} dbih_drc_t;
|
||||
|
||||
typedef struct { /* -- DATABASE -- */
|
||||
dbih_com_std_t std; /* \__ standard structure */
|
||||
dbih_com_attr_t attr; /* / plus... (nothing else right now) */
|
||||
HV *_old_cached_kids; /* not used, here for binary compat */
|
||||
} dbih_dbc_t;
|
||||
|
||||
typedef struct { /* -- STATEMENT -- */
|
||||
dbih_com_std_t std; /* \__ standard structure */
|
||||
dbih_com_attr_t attr; /* / plus ... */
|
||||
|
||||
int num_params; /* number of placeholders */
|
||||
int num_fields; /* NUM_OF_FIELDS, must be set */
|
||||
AV *fields_svav; /* special row buffer (inc bind_cols) */
|
||||
IV row_count; /* incremented by get_fbav() */
|
||||
|
||||
AV *fields_fdav; /* not used yet, may change */
|
||||
|
||||
I32 spare1;
|
||||
void *spare2;
|
||||
} dbih_stc_t;
|
||||
|
||||
|
||||
/* XXX THIS STRUCTURE SHOULD NOT BE USED */
|
||||
typedef struct { /* -- FIELD DESCRIPTOR -- */
|
||||
dbih_com_std_t std; /* standard structure (not fully setup) */
|
||||
|
||||
/* core attributes (from DescribeCol in ODBC) */
|
||||
char *col_name; /* see dbih_make_fdsv */
|
||||
I16 col_name_len;
|
||||
I16 col_sql_type;
|
||||
I16 col_precision;
|
||||
I16 col_scale;
|
||||
I16 col_nullable;
|
||||
|
||||
/* additional attributes (from ColAttributes in ODBC) */
|
||||
I32 col_length;
|
||||
I32 col_disp_size;
|
||||
|
||||
I32 spare1;
|
||||
void *spare2;
|
||||
} dbih_fdc_t;
|
||||
|
||||
|
||||
#define _imp2com(p,f) ((p)->com.f) /* private */
|
||||
|
||||
#define DBIc_FLAGS(imp) _imp2com(imp, std.flags)
|
||||
#define DBIc_TYPE(imp) _imp2com(imp, std.type)
|
||||
#define DBIc_CALL_DEPTH(imp) _imp2com(imp, std.call_depth)
|
||||
#define DBIc_MY_H(imp) _imp2com(imp, std.my_h)
|
||||
#define DBIc_PARENT_H(imp) _imp2com(imp, std.parent_h)
|
||||
#define DBIc_PARENT_COM(imp) _imp2com(imp, std.parent_com)
|
||||
#define DBIc_THR_COND(imp) _imp2com(imp, std.thr_cond)
|
||||
#define DBIc_THR_USER(imp) _imp2com(imp, std.thr_user)
|
||||
#define DBIc_THR_USER_NONE (0xFFFF)
|
||||
#define DBIc_IMP_STASH(imp) _imp2com(imp, std.imp_stash)
|
||||
#define DBIc_IMP_DATA(imp) _imp2com(imp, std.imp_data)
|
||||
#define DBIc_DBISTATE(imp) _imp2com(imp, std.dbistate)
|
||||
#define DBIc_LOGPIO(imp) DBIc_DBISTATE(imp)->logfp
|
||||
#define DBIc_KIDS(imp) _imp2com(imp, std.kids)
|
||||
#define DBIc_ACTIVE_KIDS(imp) _imp2com(imp, std.active_kids)
|
||||
#define DBIc_LAST_METHOD(imp) _imp2com(imp, std.last_method)
|
||||
|
||||
/* d = DBD flags, l = DBD level (needs to be shifted down)
|
||||
* D - DBI flags, r = reserved, L = DBI trace level
|
||||
* Trace level bit allocation: 0xddlDDDrL */
|
||||
#define DBIc_TRACE_LEVEL_MASK 0x0000000F
|
||||
#define DBIc_TRACE_FLAGS_MASK 0xFF0FFF00 /* includes DBD flag bits for DBIc_TRACE */
|
||||
#define DBIc_TRACE_SETTINGS(imp) (DBIc_DBISTATE(imp)->debug)
|
||||
#define DBIc_TRACE_LEVEL(imp) (DBIc_TRACE_SETTINGS(imp) & DBIc_TRACE_LEVEL_MASK)
|
||||
#define DBIc_TRACE_FLAGS(imp) (DBIc_TRACE_SETTINGS(imp) & DBIc_TRACE_FLAGS_MASK)
|
||||
/* DBI defined trace flags */
|
||||
#define DBIf_TRACE_SQL 0x00000100
|
||||
#define DBIf_TRACE_CON 0x00000200
|
||||
#define DBIf_TRACE_ENC 0x00000400
|
||||
#define DBIf_TRACE_DBD 0x00000800
|
||||
#define DBIf_TRACE_TXN 0x00001000
|
||||
|
||||
#define DBDc_TRACE_LEVEL_MASK 0x00F00000
|
||||
#define DBDc_TRACE_LEVEL_SHIFT 20
|
||||
#define DBDc_TRACE_LEVEL(imp) ( (DBIc_TRACE_SETTINGS(imp) & DBDc_TRACE_LEVEL_MASK) >> DBDc_TRACE_LEVEL_SHIFT )
|
||||
#define DBDc_TRACE_LEVEL_set(imp, l) ( DBIc_TRACE_SETTINGS(imp) |= (((l) << DBDc_TRACE_LEVEL_SHIFT) & DBDc_TRACE_LEVEL_MASK ))
|
||||
|
||||
/* DBIc_TRACE_MATCHES(this, crnt): true if this 'matches' (is within) crnt
|
||||
DBIc_TRACE_MATCHES(foo, DBIc_TRACE_SETTINGS(imp))
|
||||
*/
|
||||
#define DBIc_TRACE_MATCHES(this, crnt) \
|
||||
( ((crnt & DBIc_TRACE_LEVEL_MASK) >= (this & DBIc_TRACE_LEVEL_MASK)) \
|
||||
|| ((crnt & DBIc_TRACE_FLAGS_MASK) & (this & DBIc_TRACE_FLAGS_MASK)) )
|
||||
|
||||
/* DBIc_TRACE(imp, flags, flag_level, fallback_level)
|
||||
True if flags match the handle trace flags & handle trace level >= flag_level,
|
||||
OR if handle trace_level > fallback_level (typically > flag_level).
|
||||
This is the main trace testing macro to be used by drivers.
|
||||
(Drivers should define their own DBDf_TRACE_* macros for the top 8 bits: 0xFF000000)
|
||||
DBIc_TRACE(imp, 0, 0, 4) = if trace level >= 4
|
||||
DBIc_TRACE(imp, DBDf_TRACE_FOO, 2, 4) = if tracing DBDf_FOO & level>=2 or level>=4
|
||||
DBIc_TRACE(imp, DBDf_TRACE_FOO, 2, 0) = as above but never trace just due to level
|
||||
e.g.
|
||||
if (DBIc_TRACE(imp_xxh, DBIf_TRACE_SQL|DBIf_TRACE_xxx, 2, 0)) {
|
||||
PerlIO_printf(DBIc_LOGPIO(imp_sth), "\tThe %s wibbled the %s\n", ...);
|
||||
}
|
||||
*/
|
||||
#define DBIc_TRACE(imp, flags, flaglevel, level) \
|
||||
( (flags && (DBIc_TRACE_FLAGS(imp) & flags) && (DBIc_TRACE_LEVEL(imp) >= flaglevel)) \
|
||||
|| (level && DBIc_TRACE_LEVEL(imp) >= level) )
|
||||
|
||||
#define DBIc_DEBUG(imp) (_imp2com(imp, attr.TraceLevel)) /* deprecated */
|
||||
#define DBIc_DEBUGIV(imp) SvIV(DBIc_DEBUG(imp)) /* deprecated */
|
||||
#define DBIc_STATE(imp) SvRV(_imp2com(imp, attr.State))
|
||||
#define DBIc_ERR(imp) SvRV(_imp2com(imp, attr.Err))
|
||||
#define DBIc_ERRSTR(imp) SvRV(_imp2com(imp, attr.Errstr))
|
||||
#define DBIc_ErrCount(imp) _imp2com(imp, attr.ErrCount)
|
||||
#define DBIc_LongReadLen(imp) _imp2com(imp, attr.LongReadLen)
|
||||
#define DBIc_LongReadLen_init 80 /* may change */
|
||||
#define DBIc_FetchHashKeyName(imp) (_imp2com(imp, attr.FetchHashKeyName))
|
||||
|
||||
/* handle sub-type specific fields */
|
||||
/* dbh & drh */
|
||||
#define DBIc_CACHED_KIDS(imp) Nullhv /* no longer used, here for src compat */
|
||||
/* sth */
|
||||
#define DBIc_NUM_FIELDS(imp) _imp2com(imp, num_fields)
|
||||
#define DBIc_NUM_PARAMS(imp) _imp2com(imp, num_params)
|
||||
#define DBIc_NUM_PARAMS_AT_EXECUTE -9 /* see Driver.xst */
|
||||
#define DBIc_ROW_COUNT(imp) _imp2com(imp, row_count)
|
||||
#define DBIc_FIELDS_AV(imp) _imp2com(imp, fields_svav)
|
||||
#define DBIc_FDESC_AV(imp) _imp2com(imp, fields_fdav)
|
||||
#define DBIc_FDESC(imp, i) ((imp_fdh_t*)(void*)SvPVX(AvARRAY(DBIc_FDESC_AV(imp))[i]))
|
||||
|
||||
/* XXX --- DO NOT CHANGE THESE VALUES AS THEY ARE COMPILED INTO DRIVERS --- XXX */
|
||||
#define DBIcf_COMSET 0x000001 /* needs to be clear'd before free'd */
|
||||
#define DBIcf_IMPSET 0x000002 /* has implementor data to be clear'd */
|
||||
#define DBIcf_ACTIVE 0x000004 /* needs finish/disconnect before clear */
|
||||
#define DBIcf_IADESTROY 0x000008 /* do DBIc_ACTIVE_off before DESTROY */
|
||||
#define DBIcf_WARN 0x000010 /* warn about poor practice etc */
|
||||
#define DBIcf_COMPAT 0x000020 /* compat/emulation mode (eg oraperl) */
|
||||
#define DBIcf_ChopBlanks 0x000040 /* rtrim spaces from fetch char columns */
|
||||
#define DBIcf_RaiseError 0x000080 /* throw exception (croak) on error */
|
||||
#define DBIcf_PrintError 0x000100 /* warn() on error */
|
||||
#define DBIcf_AutoCommit 0x000200 /* dbh only. used by drivers */
|
||||
#define DBIcf_LongTruncOk 0x000400 /* truncation to LongReadLen is okay */
|
||||
#define DBIcf_MultiThread 0x000800 /* allow multiple threads to enter */
|
||||
#define DBIcf_HandleSetErr 0x001000 /* has coderef HandleSetErr attribute */
|
||||
#define DBIcf_ShowErrorStatement 0x002000 /* include Statement in error */
|
||||
#define DBIcf_BegunWork 0x004000 /* between begin_work & commit/rollback */
|
||||
#define DBIcf_HandleError 0x008000 /* has coderef in HandleError attribute */
|
||||
#define DBIcf_Profile 0x010000 /* profile activity on this handle */
|
||||
#define DBIcf_TaintIn 0x020000 /* check inputs for taintedness */
|
||||
#define DBIcf_TaintOut 0x040000 /* taint outgoing data */
|
||||
#define DBIcf_Executed 0x080000 /* do/execute called since commit/rollb */
|
||||
#define DBIcf_PrintWarn 0x100000 /* warn() on warning (err="0") */
|
||||
#define DBIcf_Callbacks 0x200000 /* has Callbacks attribute hash */
|
||||
#define DBIcf_AIADESTROY 0x400000 /* auto DBIcf_IADESTROY if pid changes */
|
||||
#define DBIcf_RaiseWarn 0x800000 /* throw exception (croak) on warn */
|
||||
/* NOTE: new flags may require clone() to be updated */
|
||||
|
||||
#define DBIcf_INHERITMASK /* what NOT to pass on to children */ \
|
||||
(U32)( DBIcf_COMSET | DBIcf_IMPSET | DBIcf_ACTIVE | DBIcf_IADESTROY \
|
||||
| DBIcf_AutoCommit | DBIcf_BegunWork | DBIcf_Executed | DBIcf_Callbacks )
|
||||
|
||||
/* general purpose bit setting and testing macros */
|
||||
#define DBIbf_is( bitset,flag) ((bitset) & (flag))
|
||||
#define DBIbf_has(bitset,flag) DBIbf_is(bitset, flag) /* alias for _is */
|
||||
#define DBIbf_on( bitset,flag) ((bitset) |= (flag))
|
||||
#define DBIbf_off(bitset,flag) ((bitset) &= ~(flag))
|
||||
#define DBIbf_set(bitset,flag,on) ((on) ? DBIbf_on(bitset, flag) : DBIbf_off(bitset,flag))
|
||||
|
||||
/* as above, but specifically for DBIc_FLAGS imp flags (except ACTIVE) */
|
||||
#define DBIc_is(imp, flag) DBIbf_is( DBIc_FLAGS(imp), flag)
|
||||
#define DBIc_has(imp,flag) DBIc_is(imp, flag) /* alias for DBIc_is */
|
||||
#define DBIc_on(imp, flag) DBIbf_on( DBIc_FLAGS(imp), flag)
|
||||
#define DBIc_off(imp,flag) DBIbf_off(DBIc_FLAGS(imp), flag)
|
||||
#define DBIc_set(imp,flag,on) DBIbf_set(DBIc_FLAGS(imp), flag, on)
|
||||
|
||||
#define DBIc_COMSET(imp) DBIc_is(imp, DBIcf_COMSET)
|
||||
#define DBIc_COMSET_on(imp) DBIc_on(imp, DBIcf_COMSET)
|
||||
#define DBIc_COMSET_off(imp) DBIc_off(imp,DBIcf_COMSET)
|
||||
|
||||
#define DBIc_IMPSET(imp) DBIc_is(imp, DBIcf_IMPSET)
|
||||
#define DBIc_IMPSET_on(imp) DBIc_on(imp, DBIcf_IMPSET)
|
||||
#define DBIc_IMPSET_off(imp) DBIc_off(imp,DBIcf_IMPSET)
|
||||
|
||||
#define DBIc_ACTIVE(imp) (DBIc_FLAGS(imp) & DBIcf_ACTIVE)
|
||||
#define DBIc_ACTIVE_on(imp) /* adjust parent's active kid count */ \
|
||||
do { \
|
||||
imp_xxh_t *ph_com = DBIc_PARENT_COM(imp); \
|
||||
if (!DBIc_ACTIVE(imp) && ph_com && !PL_dirty \
|
||||
&& ++DBIc_ACTIVE_KIDS(ph_com) > DBIc_KIDS(ph_com)) \
|
||||
croak("panic: DBI active kids (%ld) > kids (%ld)", \
|
||||
(long)DBIc_ACTIVE_KIDS(ph_com), \
|
||||
(long)DBIc_KIDS(ph_com)); \
|
||||
DBIc_FLAGS(imp) |= DBIcf_ACTIVE; \
|
||||
} while(0)
|
||||
#define DBIc_ACTIVE_off(imp) /* adjust parent's active kid count */ \
|
||||
do { \
|
||||
imp_xxh_t *ph_com = DBIc_PARENT_COM(imp); \
|
||||
if (DBIc_ACTIVE(imp) && ph_com && !PL_dirty \
|
||||
&& (--DBIc_ACTIVE_KIDS(ph_com) > DBIc_KIDS(ph_com) \
|
||||
|| DBIc_ACTIVE_KIDS(ph_com) < 0) ) \
|
||||
croak("panic: DBI active kids (%ld) < 0 or > kids (%ld)", \
|
||||
(long)DBIc_ACTIVE_KIDS(ph_com), \
|
||||
(long)DBIc_KIDS(ph_com)); \
|
||||
DBIc_FLAGS(imp) &= ~DBIcf_ACTIVE; \
|
||||
} while(0)
|
||||
|
||||
#define DBIc_IADESTROY(imp) (DBIc_FLAGS(imp) & DBIcf_IADESTROY)
|
||||
#define DBIc_IADESTROY_on(imp) (DBIc_FLAGS(imp) |= DBIcf_IADESTROY)
|
||||
#define DBIc_IADESTROY_off(imp) (DBIc_FLAGS(imp) &= ~DBIcf_IADESTROY)
|
||||
|
||||
#define DBIc_AIADESTROY(imp) (DBIc_FLAGS(imp) & DBIcf_AIADESTROY)
|
||||
#define DBIc_AIADESTROY_on(imp) (DBIc_FLAGS(imp) |= DBIcf_AIADESTROY)
|
||||
#define DBIc_AIADESTROY_off(imp) (DBIc_FLAGS(imp) &= ~DBIcf_AIADESTROY)
|
||||
|
||||
#define DBIc_WARN(imp) (DBIc_FLAGS(imp) & DBIcf_WARN)
|
||||
#define DBIc_WARN_on(imp) (DBIc_FLAGS(imp) |= DBIcf_WARN)
|
||||
#define DBIc_WARN_off(imp) (DBIc_FLAGS(imp) &= ~DBIcf_WARN)
|
||||
|
||||
#define DBIc_COMPAT(imp) (DBIc_FLAGS(imp) & DBIcf_COMPAT)
|
||||
#define DBIc_COMPAT_on(imp) (DBIc_FLAGS(imp) |= DBIcf_COMPAT)
|
||||
#define DBIc_COMPAT_off(imp) (DBIc_FLAGS(imp) &= ~DBIcf_COMPAT)
|
||||
|
||||
|
||||
#ifdef IN_DBI_XS /* get Handle Common Data Structure */
|
||||
#define DBIh_COM(h) (dbih_getcom2(aTHX_ h, 0))
|
||||
#else
|
||||
#define DBIh_COM(h) (DBIS->getcom(h))
|
||||
#define neatsvpv(sv,len) (DBIS->neat_svpv(sv,len))
|
||||
#endif
|
||||
|
||||
/* --- For sql_type_cast_svpv() --- */
|
||||
|
||||
#define DBIstcf_DISCARD_STRING 0x0001
|
||||
#define DBIstcf_STRICT 0x0002
|
||||
|
||||
/* --- Implementors Private Data Support --- */
|
||||
|
||||
#define D_impdata(name,type,h) type *name = (type*)(DBIh_COM(h))
|
||||
#define D_imp_drh(h) D_impdata(imp_drh, imp_drh_t, h)
|
||||
#define D_imp_dbh(h) D_impdata(imp_dbh, imp_dbh_t, h)
|
||||
#define D_imp_sth(h) D_impdata(imp_sth, imp_sth_t, h)
|
||||
#define D_imp_xxh(h) D_impdata(imp_xxh, imp_xxh_t, h)
|
||||
|
||||
#define D_imp_from_child(name,type,child) \
|
||||
type *name = (type*)(DBIc_PARENT_COM(child))
|
||||
#define D_imp_drh_from_dbh D_imp_from_child(imp_drh, imp_drh_t, imp_dbh)
|
||||
#define D_imp_dbh_from_sth D_imp_from_child(imp_dbh, imp_dbh_t, imp_sth)
|
||||
|
||||
#define DBI_IMP_SIZE(n,s) sv_setiv(get_sv((n), GV_ADDMULTI), (s)) /* XXX */
|
||||
|
||||
|
||||
|
||||
/* --- Event Support (VERY LIABLE TO CHANGE) --- */
|
||||
|
||||
#define DBIh_EVENTx(h,t,a1,a2) /* deprecated XXX */ &PL_sv_no
|
||||
#define DBIh_EVENT0(h,t) DBIh_EVENTx((h), (t), &PL_sv_undef, &PL_sv_undef)
|
||||
#define DBIh_EVENT1(h,t, a1) DBIh_EVENTx((h), (t), (a1), &PL_sv_undef)
|
||||
#define DBIh_EVENT2(h,t, a1,a2) DBIh_EVENTx((h), (t), (a1), (a2))
|
||||
|
||||
#define ERROR_event "ERROR"
|
||||
#define WARN_event "WARN"
|
||||
#define MSG_event "MESSAGE"
|
||||
#define DBEVENT_event "DBEVENT"
|
||||
#define UNKNOWN_event "UNKNOWN"
|
||||
|
||||
#define DBIh_SET_ERR_SV(h,i, err, errstr, state, method) \
|
||||
(DBIc_DBISTATE(i)->set_err_sv(h,i, err, errstr, state, method))
|
||||
#define DBIh_SET_ERR_CHAR(h,i, err_c, err_i, errstr, state, method) \
|
||||
(DBIc_DBISTATE(i)->set_err_char(h,i, err_c, err_i, errstr, state, method))
|
||||
|
||||
|
||||
/* --- Handy Macros --- */
|
||||
|
||||
#define DBIh_CLEAR_ERROR(imp_xxh) (void)( \
|
||||
(void)SvOK_off(DBIc_ERR(imp_xxh)), \
|
||||
(void)SvOK_off(DBIc_ERRSTR(imp_xxh)), \
|
||||
(void)SvOK_off(DBIc_STATE(imp_xxh)) \
|
||||
)
|
||||
|
||||
|
||||
/* --- DBI State Structure --- */
|
||||
|
||||
struct dbistate_st {
|
||||
|
||||
/* DBISTATE_VERSION is checked at runtime via DBISTATE_INIT and check_version.
|
||||
* It should be incremented on incompatible changes to dbistate_t structure.
|
||||
* Additional function pointers being assigned from spare padding, where the
|
||||
* size of the structure doesn't change, doesn't require an increment.
|
||||
* Incrementing forces all XS drivers to need to be recompiled.
|
||||
* (See also DBIXS_REVISION as a driver source compatibility tool.)
|
||||
*/
|
||||
#define DBISTATE_VERSION 94 /* ++ on incompatible dbistate_t changes */
|
||||
|
||||
/* this must be the first member in structure */
|
||||
void (*check_version) _((const char *name,
|
||||
int dbis_cv, int dbis_cs, int need_dbixs_cv,
|
||||
int drc_s, int dbc_s, int stc_s, int fdc_s));
|
||||
|
||||
/* version and size are used to check for DBI/DBD version mis-match */
|
||||
U16 version; /* version of this structure */
|
||||
U16 size;
|
||||
U16 xs_version; /* version of the overall DBIXS / DBD interface */
|
||||
U16 spare_pad;
|
||||
|
||||
I32 debug;
|
||||
PerlIO *logfp;
|
||||
|
||||
/* pointers to DBI functions which the DBD's will want to use */
|
||||
char * (*neat_svpv) _((SV *sv, STRLEN maxlen));
|
||||
imp_xxh_t * (*getcom) _((SV *h)); /* see DBIh_COM macro */
|
||||
void (*clearcom) _((imp_xxh_t *imp_xxh));
|
||||
SV * (*event) _((SV *h, const char *name, SV*, SV*));
|
||||
int (*set_attr_k) _((SV *h, SV *keysv, int dbikey, SV *valuesv));
|
||||
SV * (*get_attr_k) _((SV *h, SV *keysv, int dbikey));
|
||||
AV * (*get_fbav) _((imp_sth_t *imp_sth));
|
||||
SV * (*make_fdsv) _((SV *sth, const char *imp_class, STRLEN imp_size, const char *col_name));
|
||||
int (*bind_as_num) _((int sql_type, int p, int s, int *t, void *v)); /* XXX deprecated */
|
||||
I32 (*hash) _((const char *string, long i));
|
||||
SV * (*preparse) _((SV *sth, char *statement, IV ps_return, IV ps_accept, void *foo));
|
||||
|
||||
SV *neatsvpvlen; /* only show dbgpvlen chars when debugging pv's */
|
||||
|
||||
PerlInterpreter * thr_owner; /* thread that owns this dbistate */
|
||||
|
||||
int (*logmsg) _((imp_xxh_t *imp_xxh, const char *fmt, ...));
|
||||
int (*set_err_sv) _((SV *h, imp_xxh_t *imp_xxh, SV *err, SV *errstr, SV *state, SV *method));
|
||||
int (*set_err_char) _((SV *h, imp_xxh_t *imp_xxh, const char *err, IV err_i, const char *errstr, const char *state, const char *method));
|
||||
int (*bind_col) _((SV *sth, SV *col, SV *ref, SV *attribs));
|
||||
|
||||
IO *logfp_ref; /* keep ptr to filehandle for refcounting */
|
||||
|
||||
int (*sql_type_cast_svpv) _((pTHX_ SV *sv, int sql_type, U32 flags, void *v));
|
||||
|
||||
/* WARNING: Only add new structure members here, and reduce pad2 to keep */
|
||||
/* the memory footprint exactly the same */
|
||||
void *pad2[3];
|
||||
};
|
||||
|
||||
/* macros for backwards compatibility */
|
||||
#define set_attr(h, k, v) set_attr_k(h, k, 0, v)
|
||||
#define get_attr(h, k) get_attr_k(h, k, 0)
|
||||
|
||||
#define DBILOGFP (DBIS->logfp)
|
||||
#ifdef IN_DBI_XS
|
||||
#define DBILOGMSG (dbih_logmsg)
|
||||
#else
|
||||
#define DBILOGMSG (DBIS->logmsg)
|
||||
#endif
|
||||
|
||||
/* --- perl object (ActiveState) / multiplicity hooks and hoops --- */
|
||||
/* note that USE_ITHREADS implies MULTIPLICITY */
|
||||
|
||||
typedef dbistate_t** (*_dbi_state_lval_t)(pTHX);
|
||||
|
||||
# define _DBISTATE_DECLARE_COMMON \
|
||||
static _dbi_state_lval_t dbi_state_lval_p = 0; \
|
||||
static dbistate_t** dbi_get_state(pTHX) { \
|
||||
if (!dbi_state_lval_p) { \
|
||||
CV *cv = get_cv("DBI::_dbi_state_lval", 0); \
|
||||
if (!cv) \
|
||||
croak("Unable to get DBI state function. DBI not loaded."); \
|
||||
dbi_state_lval_p = (_dbi_state_lval_t)CvXSUB(cv); \
|
||||
} \
|
||||
return dbi_state_lval_p(aTHX); \
|
||||
} \
|
||||
typedef int dummy_dbistate /* keep semicolon from feeling lonely */
|
||||
|
||||
#if defined(MULTIPLICITY) || defined(PERL_OBJECT) || defined(PERL_CAPI)
|
||||
|
||||
# define DBISTATE_DECLARE _DBISTATE_DECLARE_COMMON
|
||||
# define _DBISTATE_INIT_DBIS
|
||||
# undef DBIS
|
||||
# define DBIS (*dbi_get_state(aTHX))
|
||||
# define dbis DBIS /* temp for old drivers using 'dbis' instead of 'DBIS' */
|
||||
|
||||
#else /* plain and simple non perl object / multiplicity case */
|
||||
|
||||
# define DBISTATE_DECLARE \
|
||||
static dbistate_t *DBIS; \
|
||||
_DBISTATE_DECLARE_COMMON
|
||||
|
||||
# define _DBISTATE_INIT_DBIS DBIS = *dbi_get_state(aTHX);
|
||||
#endif
|
||||
|
||||
# define DBISTATE_INIT { /* typically use in BOOT: of XS file */ \
|
||||
_DBISTATE_INIT_DBIS \
|
||||
if (DBIS == NULL) \
|
||||
croak("Unable to get DBI state. DBI not loaded."); \
|
||||
DBIS->check_version(__FILE__, DBISTATE_VERSION, sizeof(*DBIS), NEED_DBIXS_VERSION, \
|
||||
sizeof(dbih_drc_t), sizeof(dbih_dbc_t), sizeof(dbih_stc_t), sizeof(dbih_fdc_t) \
|
||||
); \
|
||||
}
|
||||
|
||||
|
||||
/* --- Assorted Utility Macros --- */
|
||||
|
||||
#define DBD_ATTRIB_OK(attribs) /* is this a usable attrib value */ \
|
||||
(attribs && SvROK(attribs) && SvTYPE(SvRV(attribs))==SVt_PVHV)
|
||||
|
||||
/* If attribs value supplied then croak if it's not a hash ref. */
|
||||
/* Also map undef to Null. Should always be called to pre-process the */
|
||||
/* attribs value. One day we may add some extra magic in here. */
|
||||
#define DBD_ATTRIBS_CHECK(func, h, attribs) \
|
||||
if ((attribs) && SvOK(attribs)) { \
|
||||
if (!SvROK(attribs) || SvTYPE(SvRV(attribs))!=SVt_PVHV) \
|
||||
croak("%s->%s(...): attribute parameter '%s' is not a hash ref", \
|
||||
SvPV_nolen(h), func, SvPV_nolen(attribs)); \
|
||||
} else (attribs) = Nullsv
|
||||
|
||||
#define DBD_ATTRIB_GET_SVP(attribs, key,klen) \
|
||||
(DBD_ATTRIB_OK(attribs) \
|
||||
? hv_fetch((HV*)SvRV(attribs), key,klen, 0) \
|
||||
: (SV **)Nullsv)
|
||||
|
||||
#define DBD_ATTRIB_GET_IV(attribs, key,klen, svp, var) \
|
||||
if ((svp=DBD_ATTRIB_GET_SVP(attribs, key,klen)) != NULL) \
|
||||
var = SvIV(*svp)
|
||||
|
||||
#define DBD_ATTRIB_GET_UV(attribs, key,klen, svp, var) \
|
||||
if ((svp=DBD_ATTRIB_GET_SVP(attribs, key,klen)) != NULL) \
|
||||
var = SvUV(*svp)
|
||||
|
||||
#define DBD_ATTRIB_GET_BOOL(attribs, key,klen, svp, var) \
|
||||
if ((svp=DBD_ATTRIB_GET_SVP(attribs, key,klen)) != NULL) \
|
||||
var = SvTRUE(*svp)
|
||||
|
||||
#define DBD_ATTRIB_TRUE(attribs, key,klen, svp) \
|
||||
( ((svp=DBD_ATTRIB_GET_SVP(attribs, key,klen)) != NULL) \
|
||||
? SvTRUE(*svp) : 0 )
|
||||
|
||||
#define DBD_ATTRIB_GET_PV(attribs, key,klen, svp, dflt) \
|
||||
(((svp=DBD_ATTRIB_GET_SVP(attribs, key,klen)) != NULL) \
|
||||
? SvPV_nolen(*svp) : (dflt))
|
||||
|
||||
#define DBD_ATTRIB_DELETE(attribs, key, klen) \
|
||||
hv_delete((HV*)SvRV(attribs), key, klen, G_DISCARD)
|
||||
|
||||
#endif /* DBIXS_VERSION */
|
||||
/* end of DBIXS.h */
|
|
@ -0,0 +1,851 @@
|
|||
# $Id$
|
||||
# Copyright (c) 1997-2002 Tim Bunce Ireland
|
||||
# Copyright (c) 2002 Jonathan Leffler
|
||||
#
|
||||
# You may distribute under the terms of either the GNU General Public
|
||||
# License or the Artistic License, as specified in the Perl README file.
|
||||
|
||||
|
||||
#include "Driver_xst.h"
|
||||
|
||||
# Historically dbd_db_do4, dbd_st_execute, and dbd_st_rows returned an 'int' type.
|
||||
# That's only 32 bits (31+sign) so isn't sufficient for very large row counts
|
||||
# So now instead of defining those macros, drivers can define dbd_db_do4_iv,
|
||||
# dbd_st_execute_iv, and dbd_st_rows_iv to be the names of functions that
|
||||
# return an 'IV' type. They could also set DBIc_ROW_COUNT(imp_sth).
|
||||
#
|
||||
# To save a mess of #ifdef's we arrange for dbd_st_execute (etc) to work
|
||||
# as dbd_st_execute_iv if that's defined
|
||||
#
|
||||
#if defined(dbd_st_execute_iv)
|
||||
#undef dbd_st_execute
|
||||
#define dbd_st_execute dbd_st_execute_iv
|
||||
#endif
|
||||
#if defined(dbd_st_rows_iv)
|
||||
#undef dbd_st_rows
|
||||
#define dbd_st_rows dbd_st_rows_iv
|
||||
#endif
|
||||
#if defined(dbd_db_do4_iv)
|
||||
#undef dbd_db_do4
|
||||
#define dbd_db_do4 dbd_db_do4_iv
|
||||
#endif
|
||||
|
||||
MODULE = DBD::~DRIVER~ PACKAGE = DBD::~DRIVER~
|
||||
|
||||
REQUIRE: 1.929
|
||||
PROTOTYPES: DISABLE
|
||||
|
||||
BOOT:
|
||||
PERL_UNUSED_VAR(items);
|
||||
DBISTATE_INIT;
|
||||
/* XXX this interface will change: */
|
||||
DBI_IMP_SIZE("DBD::~DRIVER~::dr::imp_data_size", sizeof(imp_drh_t));
|
||||
DBI_IMP_SIZE("DBD::~DRIVER~::db::imp_data_size", sizeof(imp_dbh_t));
|
||||
DBI_IMP_SIZE("DBD::~DRIVER~::st::imp_data_size", sizeof(imp_sth_t));
|
||||
dbd_init(DBIS);
|
||||
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# driver level interface
|
||||
# ------------------------------------------------------------
|
||||
MODULE = DBD::~DRIVER~ PACKAGE = DBD::~DRIVER~::dr
|
||||
|
||||
|
||||
void
|
||||
dbixs_revision(...)
|
||||
PPCODE:
|
||||
ST(0) = sv_2mortal(newSViv(DBIXS_REVISION));
|
||||
|
||||
|
||||
#ifdef dbd_discon_all
|
||||
|
||||
# disconnect_all renamed and ALIAS'd to avoid length clash on VMS :-(
|
||||
bool
|
||||
discon_all_(drh)
|
||||
SV * drh
|
||||
ALIAS:
|
||||
disconnect_all = 1
|
||||
CODE:
|
||||
D_imp_drh(drh);
|
||||
PERL_UNUSED_VAR(ix);
|
||||
RETVAL = dbd_discon_all(drh, imp_drh);
|
||||
OUTPUT:
|
||||
RETVAL
|
||||
|
||||
#endif /* dbd_discon_all */
|
||||
|
||||
|
||||
#ifdef dbd_dr_data_sources
|
||||
|
||||
void
|
||||
data_sources(drh, attr = Nullsv)
|
||||
SV *drh
|
||||
SV *attr
|
||||
PPCODE:
|
||||
{
|
||||
D_imp_drh(drh);
|
||||
AV *av = dbd_dr_data_sources(drh, imp_drh, attr);
|
||||
if (av) {
|
||||
int i;
|
||||
int n = AvFILL(av)+1;
|
||||
EXTEND(sp, n);
|
||||
for (i = 0; i < n; ++i) {
|
||||
PUSHs(AvARRAY(av)[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# database level interface
|
||||
# ------------------------------------------------------------
|
||||
MODULE = DBD::~DRIVER~ PACKAGE = DBD::~DRIVER~::db
|
||||
|
||||
|
||||
bool
|
||||
_login(dbh, dbname, username, password, attribs=Nullsv)
|
||||
SV * dbh
|
||||
SV * dbname
|
||||
SV * username
|
||||
SV * password
|
||||
SV * attribs
|
||||
CODE:
|
||||
{
|
||||
D_imp_dbh(dbh);
|
||||
#if !defined(dbd_db_login6_sv)
|
||||
STRLEN lna;
|
||||
char *u = (SvOK(username)) ? SvPV(username,lna) : (char*)"";
|
||||
char *p = (SvOK(password)) ? SvPV(password,lna) : (char*)"";
|
||||
#endif
|
||||
#ifdef dbd_db_login6_sv
|
||||
RETVAL = dbd_db_login6_sv(dbh, imp_dbh, dbname, username, password, attribs);
|
||||
#elif defined(dbd_db_login6)
|
||||
RETVAL = dbd_db_login6(dbh, imp_dbh, SvPV_nolen(dbname), u, p, attribs);
|
||||
#else
|
||||
PERL_UNUSED_ARG(attribs);
|
||||
RETVAL = dbd_db_login( dbh, imp_dbh, SvPV_nolen(dbname), u, p);
|
||||
#endif
|
||||
}
|
||||
OUTPUT:
|
||||
RETVAL
|
||||
|
||||
|
||||
void
|
||||
selectall_arrayref(...)
|
||||
PREINIT:
|
||||
SV *sth;
|
||||
SV **maxrows_svp;
|
||||
SV **tmp_svp;
|
||||
SV *tmp_sv;
|
||||
SV *attr = &PL_sv_undef;
|
||||
imp_sth_t *imp_sth;
|
||||
CODE:
|
||||
if (items > 2) {
|
||||
attr = ST(2);
|
||||
if (SvROK(attr) &&
|
||||
(DBD_ATTRIB_TRUE(attr,"Slice",5,tmp_svp) || DBD_ATTRIB_TRUE(attr,"Columns",7,tmp_svp))
|
||||
) {
|
||||
/* fallback to perl implementation */
|
||||
SV *tmp =dbixst_bounce_method("DBD::~DRIVER~::db::SUPER::selectall_arrayref", items);
|
||||
SPAGAIN;
|
||||
ST(0) = tmp;
|
||||
XSRETURN(1);
|
||||
}
|
||||
}
|
||||
/* --- prepare --- */
|
||||
if (SvROK(ST(1))) {
|
||||
MAGIC *mg;
|
||||
sth = ST(1);
|
||||
/* switch to inner handle if not already */
|
||||
if ( (mg = mg_find(SvRV(sth),'P')) )
|
||||
sth = mg->mg_obj;
|
||||
}
|
||||
else {
|
||||
sth = dbixst_bounce_method("prepare", 3);
|
||||
SPAGAIN; SP -= items; /* because stack might have been realloc'd */
|
||||
if (!SvROK(sth))
|
||||
XSRETURN_UNDEF;
|
||||
/* switch to inner handle */
|
||||
sth = mg_find(SvRV(sth),'P')->mg_obj;
|
||||
}
|
||||
imp_sth = (imp_sth_t*)(DBIh_COM(sth));
|
||||
/* --- bind_param --- */
|
||||
if (items > 3) { /* need to bind params before execute */
|
||||
if (!dbdxst_bind_params(sth, imp_sth, items-2, ax+2) ) {
|
||||
XSRETURN_UNDEF;
|
||||
}
|
||||
}
|
||||
/* --- execute --- */
|
||||
DBIc_ROW_COUNT(imp_sth) = 0;
|
||||
if ( dbd_st_execute(sth, imp_sth) <= -2 ) { /* -2 == error */
|
||||
XSRETURN_UNDEF;
|
||||
}
|
||||
/* --- fetchall --- */
|
||||
maxrows_svp = DBD_ATTRIB_GET_SVP(attr, "MaxRows", 7);
|
||||
tmp_sv = dbdxst_fetchall_arrayref(sth, &PL_sv_undef, (maxrows_svp) ? *maxrows_svp : &PL_sv_undef);
|
||||
SPAGAIN;
|
||||
ST(0) = tmp_sv;
|
||||
|
||||
|
||||
void
|
||||
selectrow_arrayref(...)
|
||||
ALIAS:
|
||||
selectrow_array = 1
|
||||
PREINIT:
|
||||
int is_selectrow_array = (ix == 1);
|
||||
imp_sth_t *imp_sth;
|
||||
SV *sth;
|
||||
AV *row_av;
|
||||
PPCODE:
|
||||
if (SvROK(ST(1))) {
|
||||
MAGIC *mg;
|
||||
sth = ST(1);
|
||||
/* switch to inner handle if not already */
|
||||
if ( (mg = mg_find(SvRV(sth),'P')) )
|
||||
sth = mg->mg_obj;
|
||||
}
|
||||
else {
|
||||
/* --- prepare --- */
|
||||
sth = dbixst_bounce_method("prepare", 3);
|
||||
SPAGAIN; SP -= items; /* because stack might have been realloc'd */
|
||||
if (!SvROK(sth)) {
|
||||
if (is_selectrow_array) { XSRETURN_EMPTY; } else { XSRETURN_UNDEF; }
|
||||
}
|
||||
/* switch to inner handle */
|
||||
sth = mg_find(SvRV(sth),'P')->mg_obj;
|
||||
}
|
||||
imp_sth = (imp_sth_t*)(DBIh_COM(sth));
|
||||
/* --- bind_param --- */
|
||||
if (items > 3) { /* need to bind params before execute */
|
||||
if (!dbdxst_bind_params(sth, imp_sth, items-2, ax+2) ) {
|
||||
if (is_selectrow_array) { XSRETURN_EMPTY; } else { XSRETURN_UNDEF; }
|
||||
}
|
||||
}
|
||||
/* --- execute --- */
|
||||
DBIc_ROW_COUNT(imp_sth) = 0;
|
||||
if ( dbd_st_execute(sth, imp_sth) <= -2 ) { /* -2 == error */
|
||||
if (is_selectrow_array) { XSRETURN_EMPTY; } else { XSRETURN_UNDEF; }
|
||||
}
|
||||
/* --- fetchrow_arrayref --- */
|
||||
row_av = dbd_st_fetch(sth, imp_sth);
|
||||
if (!row_av) {
|
||||
if (GIMME == G_SCALAR)
|
||||
PUSHs(&PL_sv_undef);
|
||||
}
|
||||
else if (is_selectrow_array) {
|
||||
int i;
|
||||
int num_fields = AvFILL(row_av)+1;
|
||||
if (GIMME == G_SCALAR)
|
||||
num_fields = 1; /* return just first field */
|
||||
EXTEND(sp, num_fields);
|
||||
for(i=0; i < num_fields; ++i) {
|
||||
PUSHs(AvARRAY(row_av)[i]);
|
||||
}
|
||||
}
|
||||
else {
|
||||
PUSHs( sv_2mortal(newRV((SV *)row_av)) );
|
||||
}
|
||||
/* --- finish --- */
|
||||
#ifdef dbd_st_finish3
|
||||
dbd_st_finish3(sth, imp_sth, 0);
|
||||
#else
|
||||
dbd_st_finish(sth, imp_sth);
|
||||
#endif
|
||||
|
||||
|
||||
#if defined(dbd_db_do6) || defined(dbd_db_do4)
|
||||
|
||||
void
|
||||
do(dbh, statement, params = Nullsv, ...)
|
||||
SV * dbh
|
||||
SV * statement
|
||||
SV * params
|
||||
CODE:
|
||||
{
|
||||
D_imp_dbh(dbh);
|
||||
IV retval;
|
||||
#ifdef dbd_db_do6
|
||||
/* items is a number of arguments passed to XSUB, supplied by xsubpp compiler */
|
||||
/* ax contains stack base offset used by ST() macro, supplied by xsubpp compiler */
|
||||
I32 offset = (items >= 3) ? 3 : items;
|
||||
retval = dbd_db_do6(dbh, imp_dbh, statement, params, items-offset, ax+offset);
|
||||
#else
|
||||
if (items > 3)
|
||||
croak_xs_usage(cv, "dbh, statement, params = Nullsv");
|
||||
retval = dbd_db_do4(dbh, imp_dbh, SvPV_nolen(statement), params); /* might be dbd_db_do4_iv via macro */
|
||||
#endif
|
||||
/* remember that dbd_db_do* must return <= -2 for error */
|
||||
if (retval == 0) /* ok with no rows affected */
|
||||
XST_mPV(0, "0E0"); /* (true but zero) */
|
||||
else if (retval < -1) /* -1 == unknown number of rows */
|
||||
XST_mUNDEF(0); /* <= -2 means error */
|
||||
else
|
||||
XST_mIV(0, retval); /* typically 1, rowcount or -1 */
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef dbd_db_last_insert_id
|
||||
|
||||
void
|
||||
last_insert_id(dbh, catalog=&PL_sv_undef, schema=&PL_sv_undef, table=&PL_sv_undef, field=&PL_sv_undef, attr=Nullsv)
|
||||
SV * dbh
|
||||
SV * catalog
|
||||
SV * schema
|
||||
SV * table
|
||||
SV * field
|
||||
SV * attr
|
||||
CODE:
|
||||
{
|
||||
D_imp_dbh(dbh);
|
||||
SV *ret = dbd_db_last_insert_id(dbh, imp_dbh, catalog, schema, table, field, attr);
|
||||
ST(0) = ret;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
bool
|
||||
commit(dbh)
|
||||
SV * dbh
|
||||
CODE:
|
||||
D_imp_dbh(dbh);
|
||||
if (DBIc_has(imp_dbh,DBIcf_AutoCommit) && DBIc_WARN(imp_dbh))
|
||||
warn("commit ineffective with AutoCommit enabled");
|
||||
RETVAL = dbd_db_commit(dbh, imp_dbh);
|
||||
OUTPUT:
|
||||
RETVAL
|
||||
|
||||
|
||||
bool
|
||||
rollback(dbh)
|
||||
SV * dbh
|
||||
CODE:
|
||||
D_imp_dbh(dbh);
|
||||
if (DBIc_has(imp_dbh,DBIcf_AutoCommit) && DBIc_WARN(imp_dbh))
|
||||
warn("rollback ineffective with AutoCommit enabled");
|
||||
RETVAL = dbd_db_rollback(dbh, imp_dbh);
|
||||
OUTPUT:
|
||||
RETVAL
|
||||
|
||||
|
||||
bool
|
||||
disconnect(dbh)
|
||||
SV * dbh
|
||||
CODE:
|
||||
D_imp_dbh(dbh);
|
||||
if ( !DBIc_ACTIVE(imp_dbh) ) {
|
||||
XSRETURN_YES;
|
||||
}
|
||||
/* Check for disconnect() being called whilst refs to cursors */
|
||||
/* still exists. This possibly needs some more thought. */
|
||||
if (DBIc_ACTIVE_KIDS(imp_dbh) && DBIc_WARN(imp_dbh) && !PL_dirty) {
|
||||
STRLEN lna;
|
||||
char *plural = (DBIc_ACTIVE_KIDS(imp_dbh)==1) ? (char*)"" : (char*)"s";
|
||||
warn("%s->disconnect invalidates %d active statement handle%s %s",
|
||||
SvPV(dbh,lna), (int)DBIc_ACTIVE_KIDS(imp_dbh), plural,
|
||||
"(either destroy statement handles or call finish on them before disconnecting)");
|
||||
}
|
||||
RETVAL = dbd_db_disconnect(dbh, imp_dbh);
|
||||
DBIc_ACTIVE_off(imp_dbh); /* ensure it's off, regardless */
|
||||
OUTPUT:
|
||||
RETVAL
|
||||
|
||||
|
||||
void
|
||||
STORE(dbh, keysv, valuesv)
|
||||
SV * dbh
|
||||
SV * keysv
|
||||
SV * valuesv
|
||||
CODE:
|
||||
D_imp_dbh(dbh);
|
||||
if (SvGMAGICAL(valuesv))
|
||||
mg_get(valuesv);
|
||||
ST(0) = &PL_sv_yes;
|
||||
if (!dbd_db_STORE_attrib(dbh, imp_dbh, keysv, valuesv))
|
||||
if (!DBIc_DBISTATE(imp_dbh)->set_attr(dbh, keysv, valuesv))
|
||||
ST(0) = &PL_sv_no;
|
||||
|
||||
|
||||
void
|
||||
FETCH(dbh, keysv)
|
||||
SV * dbh
|
||||
SV * keysv
|
||||
CODE:
|
||||
D_imp_dbh(dbh);
|
||||
SV *valuesv = dbd_db_FETCH_attrib(dbh, imp_dbh, keysv);
|
||||
if (!valuesv)
|
||||
valuesv = DBIc_DBISTATE(imp_dbh)->get_attr(dbh, keysv);
|
||||
ST(0) = valuesv; /* dbd_db_FETCH_attrib did sv_2mortal */
|
||||
|
||||
|
||||
void
|
||||
DESTROY(dbh)
|
||||
SV * dbh
|
||||
PPCODE:
|
||||
/* keep in sync with default DESTROY in DBI.xs */
|
||||
D_imp_dbh(dbh);
|
||||
ST(0) = &PL_sv_yes;
|
||||
if (!DBIc_IMPSET(imp_dbh)) { /* was never fully set up */
|
||||
STRLEN lna;
|
||||
if (DBIc_WARN(imp_dbh) && !PL_dirty && DBIc_DBISTATE(imp_dbh)->debug >= 2)
|
||||
PerlIO_printf(DBIc_LOGPIO(imp_dbh),
|
||||
" DESTROY for %s ignored - handle not initialised\n",
|
||||
SvPV(dbh,lna));
|
||||
}
|
||||
else {
|
||||
if (DBIc_IADESTROY(imp_dbh)) { /* wants ineffective destroy */
|
||||
DBIc_ACTIVE_off(imp_dbh);
|
||||
if (DBIc_DBISTATE(imp_dbh)->debug)
|
||||
PerlIO_printf(DBIc_LOGPIO(imp_dbh), " DESTROY %s skipped due to InactiveDestroy\n", SvPV_nolen(dbh));
|
||||
}
|
||||
if (DBIc_ACTIVE(imp_dbh)) {
|
||||
if (!DBIc_has(imp_dbh,DBIcf_AutoCommit)) {
|
||||
/* Application is using transactions and hasn't explicitly disconnected.
|
||||
Some databases will automatically commit on graceful disconnect.
|
||||
Since we're about to gracefully disconnect as part of the DESTROY
|
||||
we want to be sure we're not about to implicitly commit changes
|
||||
that are incomplete and should be rolled back. (The DESTROY may
|
||||
be due to a RaiseError, for example.) So we rollback here.
|
||||
This will be harmless if the application has issued a commit,
|
||||
XXX Could add an attribute flag to indicate that the driver
|
||||
doesn't have this problem. Patches welcome.
|
||||
*/
|
||||
if (DBIc_WARN(imp_dbh) /* only warn if likely to be useful... */
|
||||
&& DBIc_is(imp_dbh, DBIcf_Executed) /* has not just called commit/rollback */
|
||||
/* && !DBIc_is(imp_dbh, DBIcf_ReadOnly) -- is not read only */
|
||||
&& (!PL_dirty || DBIc_DBISTATE(imp_dbh)->debug >= 3)
|
||||
) {
|
||||
warn("Issuing rollback() due to DESTROY without explicit disconnect() of %s handle %s",
|
||||
SvPV_nolen(*hv_fetch((HV*)SvRV(dbh), "ImplementorClass", 16, 1)),
|
||||
SvPV_nolen(*hv_fetch((HV*)SvRV(dbh), "Name", 4, 1))
|
||||
);
|
||||
}
|
||||
dbd_db_rollback(dbh, imp_dbh); /* ROLLBACK! */
|
||||
}
|
||||
dbd_db_disconnect(dbh, imp_dbh);
|
||||
DBIc_ACTIVE_off(imp_dbh); /* ensure it's off, regardless */
|
||||
}
|
||||
dbd_db_destroy(dbh, imp_dbh);
|
||||
}
|
||||
|
||||
|
||||
#ifdef dbd_take_imp_data
|
||||
|
||||
void
|
||||
take_imp_data(h)
|
||||
SV * h
|
||||
CODE:
|
||||
D_imp_xxh(h);
|
||||
/* dbd_take_imp_data() returns &sv_no (or other defined but false value)
|
||||
* to indicate "preparations complete, now call SUPER::take_imp_data" for me.
|
||||
* Anything else is returned to the caller via sv_2mortal(sv), typically that
|
||||
* would be &sv_undef for error or an SV holding the imp_data.
|
||||
*/
|
||||
SV *sv = dbd_take_imp_data(h, imp_xxh, NULL);
|
||||
if (SvOK(sv) && !SvTRUE(sv)) {
|
||||
SV *tmp = dbixst_bounce_method("DBD::~DRIVER~::db::SUPER::take_imp_data", items);
|
||||
SPAGAIN;
|
||||
ST(0) = tmp;
|
||||
} else {
|
||||
ST(0) = sv_2mortal(sv);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef dbd_db_data_sources
|
||||
|
||||
void
|
||||
data_sources(dbh, attr = Nullsv)
|
||||
SV *dbh
|
||||
SV *attr
|
||||
PPCODE:
|
||||
{
|
||||
D_imp_dbh(dbh);
|
||||
AV *av = dbd_db_data_sources(dbh, imp_dbh, attr);
|
||||
if (av) {
|
||||
int i;
|
||||
int n = AvFILL(av)+1;
|
||||
EXTEND(sp, n);
|
||||
for (i = 0; i < n; ++i) {
|
||||
PUSHs(AvARRAY(av)[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
# -- end of DBD::~DRIVER~::db
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# statement interface
|
||||
# ------------------------------------------------------------
|
||||
MODULE = DBD::~DRIVER~ PACKAGE = DBD::~DRIVER~::st
|
||||
|
||||
|
||||
bool
|
||||
_prepare(sth, statement, attribs=Nullsv)
|
||||
SV * sth
|
||||
SV * statement
|
||||
SV * attribs
|
||||
CODE:
|
||||
{
|
||||
D_imp_sth(sth);
|
||||
DBD_ATTRIBS_CHECK("_prepare", sth, attribs);
|
||||
#ifdef dbd_st_prepare_sv
|
||||
RETVAL = dbd_st_prepare_sv(sth, imp_sth, statement, attribs);
|
||||
#else
|
||||
RETVAL = dbd_st_prepare(sth, imp_sth, SvPV_nolen(statement), attribs);
|
||||
#endif
|
||||
}
|
||||
OUTPUT:
|
||||
RETVAL
|
||||
|
||||
|
||||
#ifdef dbd_st_rows
|
||||
|
||||
void
|
||||
rows(sth)
|
||||
SV * sth
|
||||
CODE:
|
||||
D_imp_sth(sth);
|
||||
XST_mIV(0, dbd_st_rows(sth, imp_sth));
|
||||
|
||||
#endif /* dbd_st_rows */
|
||||
|
||||
|
||||
#ifdef dbd_st_bind_col
|
||||
|
||||
bool
|
||||
bind_col(sth, col, ref, attribs=Nullsv)
|
||||
SV * sth
|
||||
SV * col
|
||||
SV * ref
|
||||
SV * attribs
|
||||
CODE:
|
||||
{
|
||||
IV sql_type = 0;
|
||||
D_imp_sth(sth);
|
||||
if (SvGMAGICAL(ref))
|
||||
mg_get(ref);
|
||||
if (attribs) {
|
||||
if (SvNIOK(attribs)) {
|
||||
sql_type = SvIV(attribs);
|
||||
attribs = Nullsv;
|
||||
}
|
||||
else {
|
||||
SV **svp;
|
||||
DBD_ATTRIBS_CHECK("bind_col", sth, attribs);
|
||||
/* XXX we should perhaps complain if TYPE is not SvNIOK */
|
||||
DBD_ATTRIB_GET_IV(attribs, "TYPE",4, svp, sql_type);
|
||||
}
|
||||
}
|
||||
switch(dbd_st_bind_col(sth, imp_sth, col, ref, sql_type, attribs)) {
|
||||
case 2: RETVAL = TRUE; /* job done completely */
|
||||
break;
|
||||
case 1: /* fallback to DBI default */
|
||||
RETVAL = DBIc_DBISTATE(imp_sth)->bind_col(sth, col, ref, attribs);
|
||||
break;
|
||||
default: RETVAL = FALSE; /* dbd_st_bind_col has called set_err */
|
||||
break;
|
||||
}
|
||||
}
|
||||
OUTPUT:
|
||||
RETVAL
|
||||
|
||||
#endif /* dbd_st_bind_col */
|
||||
|
||||
bool
|
||||
bind_param(sth, param, value, attribs=Nullsv)
|
||||
SV * sth
|
||||
SV * param
|
||||
SV * value
|
||||
SV * attribs
|
||||
CODE:
|
||||
{
|
||||
IV sql_type = 0;
|
||||
D_imp_sth(sth);
|
||||
if (SvGMAGICAL(value))
|
||||
mg_get(value);
|
||||
if (attribs) {
|
||||
if (SvNIOK(attribs)) {
|
||||
sql_type = SvIV(attribs);
|
||||
attribs = Nullsv;
|
||||
}
|
||||
else {
|
||||
SV **svp;
|
||||
DBD_ATTRIBS_CHECK("bind_param", sth, attribs);
|
||||
/* XXX we should perhaps complain if TYPE is not SvNIOK */
|
||||
DBD_ATTRIB_GET_IV(attribs, "TYPE",4, svp, sql_type);
|
||||
}
|
||||
}
|
||||
RETVAL = dbd_bind_ph(sth, imp_sth, param, value, sql_type, attribs, FALSE, 0);
|
||||
}
|
||||
OUTPUT:
|
||||
RETVAL
|
||||
|
||||
|
||||
bool
|
||||
bind_param_inout(sth, param, value_ref, maxlen, attribs=Nullsv)
|
||||
SV * sth
|
||||
SV * param
|
||||
SV * value_ref
|
||||
IV maxlen
|
||||
SV * attribs
|
||||
CODE:
|
||||
{
|
||||
IV sql_type = 0;
|
||||
D_imp_sth(sth);
|
||||
SV *value;
|
||||
if (!SvROK(value_ref) || SvTYPE(SvRV(value_ref)) > SVt_PVMG)
|
||||
croak("bind_param_inout needs a reference to a scalar value");
|
||||
value = SvRV(value_ref);
|
||||
if (SvREADONLY(value))
|
||||
croak("Modification of a read-only value attempted");
|
||||
if (SvGMAGICAL(value))
|
||||
mg_get(value);
|
||||
if (attribs) {
|
||||
if (SvNIOK(attribs)) {
|
||||
sql_type = SvIV(attribs);
|
||||
attribs = Nullsv;
|
||||
}
|
||||
else {
|
||||
SV **svp;
|
||||
DBD_ATTRIBS_CHECK("bind_param", sth, attribs);
|
||||
DBD_ATTRIB_GET_IV(attribs, "TYPE",4, svp, sql_type);
|
||||
}
|
||||
}
|
||||
RETVAL = dbd_bind_ph(sth, imp_sth, param, value, sql_type, attribs, TRUE, maxlen);
|
||||
}
|
||||
OUTPUT:
|
||||
RETVAL
|
||||
|
||||
|
||||
void
|
||||
execute(sth, ...)
|
||||
SV * sth
|
||||
CODE:
|
||||
D_imp_sth(sth);
|
||||
IV retval;
|
||||
if (items > 1) { /* need to bind params */
|
||||
if (!dbdxst_bind_params(sth, imp_sth, items, ax) ) {
|
||||
XSRETURN_UNDEF;
|
||||
}
|
||||
}
|
||||
/* XXX this code is duplicated in selectrow_arrayref above */
|
||||
DBIc_ROW_COUNT(imp_sth) = 0;
|
||||
retval = dbd_st_execute(sth, imp_sth); /* might be dbd_st_execute_iv via macro */
|
||||
/* remember that dbd_st_execute must return <= -2 for error */
|
||||
if (retval == 0) /* ok with no rows affected */
|
||||
XST_mPV(0, "0E0"); /* (true but zero) */
|
||||
else if (retval < -1) /* -1 == unknown number of rows */
|
||||
XST_mUNDEF(0); /* <= -2 means error */
|
||||
else
|
||||
XST_mIV(0, retval); /* typically 1, rowcount or -1 */
|
||||
|
||||
|
||||
#ifdef dbd_st_execute_for_fetch
|
||||
|
||||
void
|
||||
execute_for_fetch(sth, fetch_tuple_sub, tuple_status = Nullsv)
|
||||
SV * sth
|
||||
SV * fetch_tuple_sub
|
||||
SV * tuple_status
|
||||
CODE:
|
||||
{
|
||||
D_imp_sth(sth);
|
||||
SV *ret = dbd_st_execute_for_fetch(sth, imp_sth, fetch_tuple_sub, tuple_status);
|
||||
ST(0) = ret;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef dbd_st_last_insert_id
|
||||
|
||||
void
|
||||
last_insert_id(sth, catalog=&PL_sv_undef, schema=&PL_sv_undef, table=&PL_sv_undef, field=&PL_sv_undef, attr=Nullsv)
|
||||
SV * sth
|
||||
SV * catalog
|
||||
SV * schema
|
||||
SV * table
|
||||
SV * field
|
||||
SV * attr
|
||||
CODE:
|
||||
{
|
||||
D_imp_sth(sth);
|
||||
SV *ret = dbd_st_last_insert_id(sth, imp_sth, catalog, schema, table, field, attr);
|
||||
ST(0) = ret;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
void
|
||||
fetchrow_arrayref(sth)
|
||||
SV * sth
|
||||
ALIAS:
|
||||
fetch = 1
|
||||
CODE:
|
||||
D_imp_sth(sth);
|
||||
AV *av;
|
||||
PERL_UNUSED_VAR(ix);
|
||||
av = dbd_st_fetch(sth, imp_sth);
|
||||
ST(0) = (av) ? sv_2mortal(newRV((SV *)av)) : &PL_sv_undef;
|
||||
|
||||
|
||||
void
|
||||
fetchrow_array(sth)
|
||||
SV * sth
|
||||
ALIAS:
|
||||
fetchrow = 1
|
||||
PPCODE:
|
||||
D_imp_sth(sth);
|
||||
AV *av;
|
||||
av = dbd_st_fetch(sth, imp_sth);
|
||||
if (av) {
|
||||
int i;
|
||||
int num_fields = AvFILL(av)+1;
|
||||
EXTEND(sp, num_fields);
|
||||
for(i=0; i < num_fields; ++i) {
|
||||
PUSHs(AvARRAY(av)[i]);
|
||||
}
|
||||
PERL_UNUSED_VAR(ix);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
fetchall_arrayref(sth, slice=&PL_sv_undef, batch_row_count=&PL_sv_undef)
|
||||
SV * sth
|
||||
SV * slice
|
||||
SV * batch_row_count
|
||||
CODE:
|
||||
if (SvOK(slice)) { /* fallback to perl implementation */
|
||||
SV *tmp = dbixst_bounce_method("DBD::~DRIVER~::st::SUPER::fetchall_arrayref", 3);
|
||||
SPAGAIN;
|
||||
ST(0) = tmp;
|
||||
}
|
||||
else {
|
||||
SV *tmp = dbdxst_fetchall_arrayref(sth, slice, batch_row_count);
|
||||
SPAGAIN;
|
||||
ST(0) = tmp;
|
||||
}
|
||||
|
||||
|
||||
bool
|
||||
finish(sth)
|
||||
SV * sth
|
||||
CODE:
|
||||
D_imp_sth(sth);
|
||||
D_imp_dbh_from_sth;
|
||||
if (!DBIc_ACTIVE(imp_sth)) {
|
||||
/* No active statement to finish */
|
||||
XSRETURN_YES;
|
||||
}
|
||||
if (!DBIc_ACTIVE(imp_dbh)) {
|
||||
/* Either an explicit disconnect() or global destruction */
|
||||
/* has disconnected us from the database. Finish is meaningless */
|
||||
DBIc_ACTIVE_off(imp_sth);
|
||||
XSRETURN_YES;
|
||||
}
|
||||
#ifdef dbd_st_finish3
|
||||
RETVAL = dbd_st_finish3(sth, imp_sth, 0);
|
||||
#else
|
||||
RETVAL = dbd_st_finish(sth, imp_sth);
|
||||
#endif
|
||||
OUTPUT:
|
||||
RETVAL
|
||||
|
||||
|
||||
void
|
||||
blob_read(sth, field, offset, len, destrv=Nullsv, destoffset=0)
|
||||
SV * sth
|
||||
int field
|
||||
long offset
|
||||
long len
|
||||
SV * destrv
|
||||
long destoffset
|
||||
CODE:
|
||||
{
|
||||
D_imp_sth(sth);
|
||||
if (!destrv)
|
||||
destrv = sv_2mortal(newRV(sv_2mortal(newSV(0))));
|
||||
if (dbd_st_blob_read(sth, imp_sth, field, offset, len, destrv, destoffset))
|
||||
ST(0) = SvRV(destrv);
|
||||
else ST(0) = &PL_sv_undef;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
STORE(sth, keysv, valuesv)
|
||||
SV * sth
|
||||
SV * keysv
|
||||
SV * valuesv
|
||||
CODE:
|
||||
D_imp_sth(sth);
|
||||
if (SvGMAGICAL(valuesv))
|
||||
mg_get(valuesv);
|
||||
ST(0) = &PL_sv_yes;
|
||||
if (!dbd_st_STORE_attrib(sth, imp_sth, keysv, valuesv))
|
||||
if (!DBIc_DBISTATE(imp_sth)->set_attr(sth, keysv, valuesv))
|
||||
ST(0) = &PL_sv_no;
|
||||
|
||||
|
||||
# FETCH renamed and ALIAS'd to avoid case clash on VMS :-(
|
||||
void
|
||||
FETCH_attrib(sth, keysv)
|
||||
SV * sth
|
||||
SV * keysv
|
||||
ALIAS:
|
||||
FETCH = 1
|
||||
CODE:
|
||||
D_imp_sth(sth);
|
||||
SV *valuesv;
|
||||
PERL_UNUSED_VAR(ix);
|
||||
valuesv = dbd_st_FETCH_attrib(sth, imp_sth, keysv);
|
||||
if (!valuesv)
|
||||
valuesv = DBIc_DBISTATE(imp_sth)->get_attr(sth, keysv);
|
||||
ST(0) = valuesv; /* dbd_st_FETCH_attrib did sv_2mortal */
|
||||
|
||||
|
||||
void
|
||||
DESTROY(sth)
|
||||
SV * sth
|
||||
PPCODE:
|
||||
/* keep in sync with default DESTROY in DBI.xs */
|
||||
D_imp_sth(sth);
|
||||
ST(0) = &PL_sv_yes;
|
||||
if (!DBIc_IMPSET(imp_sth)) { /* was never fully set up */
|
||||
STRLEN lna;
|
||||
if (DBIc_WARN(imp_sth) && !PL_dirty && DBIc_DBISTATE(imp_sth)->debug >= 2)
|
||||
PerlIO_printf(DBIc_LOGPIO(imp_sth),
|
||||
" DESTROY for %s ignored - handle not initialised\n",
|
||||
SvPV(sth,lna));
|
||||
}
|
||||
else {
|
||||
if (DBIc_IADESTROY(imp_sth)) { /* wants ineffective destroy */
|
||||
DBIc_ACTIVE_off(imp_sth);
|
||||
if (DBIc_DBISTATE(imp_sth)->debug)
|
||||
PerlIO_printf(DBIc_LOGPIO(imp_sth), " DESTROY %s skipped due to InactiveDestroy\n", SvPV_nolen(sth));
|
||||
}
|
||||
if (DBIc_ACTIVE(imp_sth)) {
|
||||
D_imp_dbh_from_sth;
|
||||
if (!PL_dirty && DBIc_ACTIVE(imp_dbh)) {
|
||||
#ifdef dbd_st_finish3
|
||||
dbd_st_finish3(sth, imp_sth, 1);
|
||||
#else
|
||||
dbd_st_finish(sth, imp_sth);
|
||||
#endif
|
||||
}
|
||||
else {
|
||||
DBIc_ACTIVE_off(imp_sth);
|
||||
}
|
||||
}
|
||||
dbd_st_destroy(sth, imp_sth);
|
||||
}
|
||||
|
||||
# end of ~DRIVER~.xst
|
||||
# vim:ts=8:sw=4:et
|
|
@ -0,0 +1,122 @@
|
|||
/*
|
||||
# $Id$
|
||||
# Copyright (c) 2002 Tim Bunce Ireland
|
||||
#
|
||||
# You may distribute under the terms of either the GNU General Public
|
||||
# License or the Artistic License, as specified in the Perl README file.
|
||||
*/
|
||||
|
||||
|
||||
/* This is really just a workaround for SUPER:: not working right for XS code.
|
||||
* It would be better if we setup perl's context so SUPER:: did the right thing
|
||||
* (borrowing the relevant magic from pp_entersub in perl pp_hot.c).
|
||||
* Then we could just use call_method("SUPER::foo") instead.
|
||||
* XXX remember to call SPAGAIN in the calling code after calling this!
|
||||
*/
|
||||
static SV *
|
||||
dbixst_bounce_method(char *methname, int params)
|
||||
{
|
||||
dTHX;
|
||||
/* XXX this 'magic' undoes the dMARK embedded in the dXSARGS of our caller */
|
||||
/* so that the dXSARGS below can set things up as they were for our caller */
|
||||
void *xxx = PL_markstack_ptr++;
|
||||
dXSARGS; /* declares sp, ax, mark, items */
|
||||
int i;
|
||||
SV *sv;
|
||||
int debug = 0;
|
||||
D_imp_xxh(ST(0));
|
||||
if (debug >= 3) {
|
||||
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
|
||||
" -> %s (trampoline call with %d (%ld) params)\n", methname, params, (long)items);
|
||||
PERL_UNUSED_VAR(xxx);
|
||||
}
|
||||
EXTEND(SP, params);
|
||||
PUSHMARK(SP);
|
||||
for (i=0; i < params; ++i) {
|
||||
sv = (i >= items) ? &PL_sv_undef : ST(i);
|
||||
PUSHs(sv);
|
||||
}
|
||||
PUTBACK;
|
||||
i = call_method(methname, G_SCALAR);
|
||||
SPAGAIN;
|
||||
sv = (i) ? POPs : &PL_sv_undef;
|
||||
PUTBACK;
|
||||
if (debug >= 3)
|
||||
PerlIO_printf(DBIc_LOGPIO(imp_xxh),
|
||||
" <- %s= %s (trampoline call return)\n", methname, neatsvpv(sv,0));
|
||||
return sv;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
dbdxst_bind_params(SV *sth, imp_sth_t *imp_sth, I32 items, I32 ax)
|
||||
{
|
||||
/* Handle binding supplied values to placeholders. */
|
||||
/* items = one greater than the number of params */
|
||||
/* ax = ax from calling sub, maybe adjusted to match items */
|
||||
dTHX;
|
||||
int i;
|
||||
SV *idx;
|
||||
if (items-1 != DBIc_NUM_PARAMS(imp_sth)
|
||||
&& DBIc_NUM_PARAMS(imp_sth) != DBIc_NUM_PARAMS_AT_EXECUTE
|
||||
) {
|
||||
char errmsg[99];
|
||||
/* clear any previous ParamValues before error is generated */
|
||||
SV **svp = hv_fetch((HV*)DBIc_MY_H(imp_sth),"ParamValues",11,FALSE);
|
||||
if (svp && SvROK(*svp) && SvTYPE(SvRV(*svp)) == SVt_PVHV) {
|
||||
HV *hv = (HV*)SvRV(*svp);
|
||||
hv_clear(hv);
|
||||
}
|
||||
sprintf(errmsg,"called with %d bind variables when %d are needed",
|
||||
(int)items-1, DBIc_NUM_PARAMS(imp_sth));
|
||||
DBIh_SET_ERR_CHAR(sth, (imp_xxh_t*)imp_sth, "-1", -1, errmsg, Nullch, Nullch);
|
||||
return 0;
|
||||
}
|
||||
idx = sv_2mortal(newSViv(0));
|
||||
for(i=1; i < items ; ++i) {
|
||||
SV* value = ST(i);
|
||||
if (SvGMAGICAL(value))
|
||||
mg_get(value); /* trigger magic to FETCH the value */
|
||||
sv_setiv(idx, i);
|
||||
if (!dbd_bind_ph(sth, imp_sth, idx, value, 0, Nullsv, FALSE, 0)) {
|
||||
return 0; /* dbd_bind_ph already registered error */
|
||||
}
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
#ifndef dbd_fetchall_arrayref
|
||||
static SV *
|
||||
dbdxst_fetchall_arrayref(SV *sth, SV *slice, SV *batch_row_count)
|
||||
{
|
||||
dTHX;
|
||||
D_imp_sth(sth);
|
||||
SV *rows_rvav;
|
||||
if (SvOK(slice)) { /* should never get here */
|
||||
char errmsg[99];
|
||||
sprintf(errmsg,"slice param not supported by XS version of fetchall_arrayref");
|
||||
DBIh_SET_ERR_CHAR(sth, (imp_xxh_t*)imp_sth, "-1", -1, errmsg, Nullch, Nullch);
|
||||
return &PL_sv_undef;
|
||||
}
|
||||
else {
|
||||
IV maxrows = SvOK(batch_row_count) ? SvIV(batch_row_count) : -1;
|
||||
AV *fetched_av;
|
||||
AV *rows_av = newAV();
|
||||
if ( !DBIc_ACTIVE(imp_sth) && maxrows>0 ) {
|
||||
/* to simplify application logic we return undef without an error */
|
||||
/* if we've fetched all the rows and called with a batch_row_count */
|
||||
return &PL_sv_undef;
|
||||
}
|
||||
av_extend(rows_av, (maxrows>0) ? maxrows : 31);
|
||||
while ( (maxrows < 0 || maxrows-- > 0)
|
||||
&& (fetched_av = dbd_st_fetch(sth, imp_sth))
|
||||
) {
|
||||
AV *copy_row_av = av_make(AvFILL(fetched_av)+1, AvARRAY(fetched_av));
|
||||
av_push(rows_av, newRV_noinc((SV*)copy_row_av));
|
||||
}
|
||||
rows_rvav = sv_2mortal(newRV_noinc((SV *)rows_av));
|
||||
}
|
||||
return rows_rvav;
|
||||
}
|
||||
#endif
|
||||
|
|
@ -0,0 +1,46 @@
|
|||
BEFORE BUILDING, TESTING AND INSTALLING this you will need to:
|
||||
|
||||
Build, test and install a recent version of Perl 5
|
||||
It is very important to test it and actually install it!
|
||||
(You can use "Configure -Dprefix=..." to build a private copy.)
|
||||
|
||||
BUILDING
|
||||
|
||||
perl Makefile.PL
|
||||
make
|
||||
make test
|
||||
make test TEST_VERBOSE=1 (if any of the t/* tests fail)
|
||||
make install (if the tests look okay)
|
||||
|
||||
The perl you use to execute Makefile.PL should be the first one in your PATH.
|
||||
If you want to use some installed perl then modify your PATH to match.
|
||||
|
||||
IF YOU HAVE PROBLEMS
|
||||
|
||||
---
|
||||
If you get an error like "gcc: command not found" or "cc: command not found"
|
||||
you need to either install a compiler, or you may be able to install a
|
||||
precompiled binary of DBI using a package manager (e.g., ppm for ActiveState,
|
||||
Synaptic for Ubuntu, port for FreeBSD etc)
|
||||
|
||||
---
|
||||
If you get compiler errors referring to Perl's own header files
|
||||
(.../CORE/...h) or the compiler complains about bad options etc then
|
||||
there is something wrong with your perl installation. If the compiler complains
|
||||
of missing files (.../perl.h: error: sys/types.h: No such file) then you may
|
||||
need to install extra packages for your operating system.
|
||||
|
||||
Generally it's best to use a Perl that was built on the system you are trying
|
||||
to use and it's also important to use the same compiler that was used to build
|
||||
the Perl you are using.
|
||||
|
||||
If you installed Perl using a binary distribution, such as ActiveState Perl,
|
||||
or if Perl came installed with the operating system you use, such as Debian or
|
||||
Ubuntu, then you may be able to install a precompiled binary of DBI using a
|
||||
package manager. Check the package manager for your distribution of Perl (e.g.
|
||||
ppm for ActiveState) or for your operating system (e.g Synaptic for Ubuntu).
|
||||
|
||||
---
|
||||
If you get compiler warnings like "value computed is not used" and
|
||||
"unused variable" you can ignore them.
|
||||
|
|
@ -0,0 +1,412 @@
|
|||
DBI is Copyright (c) 1994-2015 by Tim Bunce and others.
|
||||
See LICENSE included with this distribution. All rights reserved.
|
||||
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
same terms as the Perl5 (v5.0.0 ~ v5.20.0) programming language system
|
||||
itself: under the terms of either:
|
||||
|
||||
a) the "Artistic License 1.0" as published by The Perl Foundation
|
||||
http://www.perlfoundation.org/artistic_license_1_0
|
||||
|
||||
b) the GNU General Public License as published by the Free Software Foundation;
|
||||
either version 1 http://www.gnu.org/licenses/gpl-1.0.html
|
||||
or (at your option) any later version
|
||||
|
||||
PLEASE NOTE: It is the current maintainers intention to keep the dual
|
||||
licensing intact. Until this notice is removed, releases will continue to
|
||||
be available under both the standard GPL and the less restrictive Artistic
|
||||
licenses.
|
||||
|
||||
Verbatim copies of both licenses are included below:
|
||||
|
||||
|
||||
|
||||
--- The Artistic License 1.0 ---
|
||||
|
||||
The "Artistic License"
|
||||
|
||||
Preamble
|
||||
|
||||
The intent of this document is to state the conditions under which a
|
||||
Package may be copied, such that the Copyright Holder maintains some
|
||||
semblance of artistic control over the development of the package,
|
||||
while giving the users of the package the right to use and distribute
|
||||
the Package in a more-or-less customary fashion, plus the right to make
|
||||
reasonable modifications.
|
||||
|
||||
Definitions:
|
||||
|
||||
"Package" refers to the collection of files distributed by the
|
||||
Copyright Holder, and derivatives of that collection of files
|
||||
created through textual modification.
|
||||
|
||||
"Standard Version" refers to such a Package if it has not been
|
||||
modified, or has been modified in accordance with the wishes
|
||||
of the Copyright Holder as specified below.
|
||||
|
||||
"Copyright Holder" is whoever is named in the copyright or
|
||||
copyrights for the package.
|
||||
|
||||
"You" is you, if you're thinking about copying or distributing
|
||||
this Package.
|
||||
|
||||
"Reasonable copying fee" is whatever you can justify on the
|
||||
basis of media cost, duplication charges, time of people involved,
|
||||
and so on. (You will not be required to justify it to the
|
||||
Copyright Holder, but only to the computing community at large
|
||||
as a market that must bear the fee.)
|
||||
|
||||
"Freely Available" means that no fee is charged for the item
|
||||
itself, though there may be fees involved in handling the item.
|
||||
It also means that recipients of the item may redistribute it
|
||||
under the same conditions they received it.
|
||||
|
||||
1. You may make and give away verbatim copies of the source form of the
|
||||
Standard Version of this Package without restriction, provided that you
|
||||
duplicate all of the original copyright notices and associated disclaimers.
|
||||
|
||||
2. You may apply bug fixes, portability fixes and other modifications
|
||||
derived from the Public Domain or from the Copyright Holder. A Package
|
||||
modified in such a way shall still be considered the Standard Version.
|
||||
|
||||
3. You may otherwise modify your copy of this Package in any way, provided
|
||||
that you insert a prominent notice in each changed file stating how and
|
||||
when you changed that file, and provided that you do at least ONE of the
|
||||
following:
|
||||
|
||||
a) place your modifications in the Public Domain or otherwise make them
|
||||
Freely Available, such as by posting said modifications to Usenet or
|
||||
an equivalent medium, or placing the modifications on a major archive
|
||||
site such as uunet.uu.net, or by allowing the Copyright Holder to include
|
||||
your modifications in the Standard Version of the Package.
|
||||
|
||||
b) use the modified Package only within your corporation or organization.
|
||||
|
||||
c) rename any non-standard executables so the names do not conflict
|
||||
with standard executables, which must also be provided, and provide
|
||||
a separate manual page for each non-standard executable that clearly
|
||||
documents how it differs from the Standard Version.
|
||||
|
||||
d) make other distribution arrangements with the Copyright Holder.
|
||||
|
||||
4. You may distribute the programs of this Package in object code or
|
||||
executable form, provided that you do at least ONE of the following:
|
||||
|
||||
a) distribute a Standard Version of the executables and library files,
|
||||
together with instructions (in the manual page or equivalent) on where
|
||||
to get the Standard Version.
|
||||
|
||||
b) accompany the distribution with the machine-readable source of
|
||||
the Package with your modifications.
|
||||
|
||||
c) give non-standard executables non-standard names, and clearly
|
||||
document the differences in manual pages (or equivalent), together
|
||||
with instructions on where to get the Standard Version.
|
||||
|
||||
d) make other distribution arrangements with the Copyright Holder.
|
||||
|
||||
5. You may charge a reasonable copying fee for any distribution of this
|
||||
Package. You may charge any fee you choose for support of this
|
||||
Package. You may not charge a fee for this Package itself. However,
|
||||
you may distribute this Package in aggregate with other (possibly
|
||||
commercial) programs as part of a larger (possibly commercial) software
|
||||
distribution provided that you do not advertise this Package as a
|
||||
product of your own. You may embed this Package's interpreter within
|
||||
an executable of yours (by linking); this shall be construed as a mere
|
||||
form of aggregation, provided that the complete Standard Version of the
|
||||
interpreter is so embedded.
|
||||
|
||||
6. The scripts and library files supplied as input to or produced as
|
||||
output from the programs of this Package do not automatically fall
|
||||
under the copyright of this Package, but belong to whoever generated
|
||||
them, and may be sold commercially, and may be aggregated with this
|
||||
Package. If such scripts or library files are aggregated with this
|
||||
Package via the so-called "undump" or "unexec" methods of producing a
|
||||
binary executable image, then distribution of such an image shall
|
||||
neither be construed as a distribution of this Package nor shall it
|
||||
fall under the restrictions of Paragraphs 3 and 4, provided that you do
|
||||
not represent such an executable image as a Standard Version of this
|
||||
Package.
|
||||
|
||||
7. C subroutines (or comparably compiled subroutines in other
|
||||
languages) supplied by you and linked into this Package in order to
|
||||
emulate subroutines and variables of the language defined by this
|
||||
Package shall not be considered part of this Package, but are the
|
||||
equivalent of input as in Paragraph 6, provided these subroutines do
|
||||
not change the language in any way that would cause it to fail the
|
||||
regression tests for the language.
|
||||
|
||||
8. Aggregation of this Package with a commercial distribution is always
|
||||
permitted provided that the use of this Package is embedded; that is,
|
||||
when no overt attempt is made to make this Package's interfaces visible
|
||||
to the end user of the commercial distribution. Such use shall not be
|
||||
construed as a distribution of this Package.
|
||||
|
||||
9. The name of the Copyright Holder may not be used to endorse or promote
|
||||
products derived from this software without specific prior written permission.
|
||||
|
||||
10. THIS PACKAGE IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
|
||||
|
||||
--- end of The Artistic License 1.0 ---
|
||||
|
||||
|
||||
|
||||
|
||||
--- The GNU General Public License, Version 1, February 1989 ---
|
||||
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 1, February 1989
|
||||
|
||||
Copyright (C) 1989 Free Software Foundation, Inc.
|
||||
51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The license agreements of most software companies try to keep users
|
||||
at the mercy of those companies. By contrast, our General Public
|
||||
License is intended to guarantee your freedom to share and change free
|
||||
software--to make sure the software is free for all its users. The
|
||||
General Public License applies to the Free Software Foundation's
|
||||
software and to any other program whose authors commit to using it.
|
||||
You can use it for your programs, too.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Specifically, the General Public License is designed to make
|
||||
sure that you have the freedom to give away or sell copies of free
|
||||
software, that you receive source code or can get it if you want it,
|
||||
that you can change the software or use pieces of it in new free
|
||||
programs; and that you know you can do these things.
|
||||
|
||||
To protect your rights, we need to make restrictions that forbid
|
||||
anyone to deny you these rights or to ask you to surrender the rights.
|
||||
These restrictions translate to certain responsibilities for you if you
|
||||
distribute copies of the software, or if you modify it.
|
||||
|
||||
For example, if you distribute copies of a such a program, whether
|
||||
gratis or for a fee, you must give the recipients all the rights that
|
||||
you have. You must make sure that they, too, receive or can get the
|
||||
source code. And you must tell them their rights.
|
||||
|
||||
We protect your rights with two steps: (1) copyright the software, and
|
||||
(2) offer you this license which gives you legal permission to copy,
|
||||
distribute and/or modify the software.
|
||||
|
||||
Also, for each author's protection and ours, we want to make certain
|
||||
that everyone understands that there is no warranty for this free
|
||||
software. If the software is modified by someone else and passed on, we
|
||||
want its recipients to know that what they have is not the original, so
|
||||
that any problems introduced by others will not reflect on the original
|
||||
authors' reputations.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
0. This License Agreement applies to any program or other work which
|
||||
contains a notice placed by the copyright holder saying it may be
|
||||
distributed under the terms of this General Public License. The
|
||||
"Program", below, refers to any such program or work, and a "work based
|
||||
on the Program" means either the Program or any work containing the
|
||||
Program or a portion of it, either verbatim or with modifications. Each
|
||||
licensee is addressed as "you".
|
||||
|
||||
1. You may copy and distribute verbatim copies of the Program's source
|
||||
code as you receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice and
|
||||
disclaimer of warranty; keep intact all the notices that refer to this
|
||||
General Public License and to the absence of any warranty; and give any
|
||||
other recipients of the Program a copy of this General Public License
|
||||
along with the Program. You may charge a fee for the physical act of
|
||||
transferring a copy.
|
||||
|
||||
2. You may modify your copy or copies of the Program or any portion of
|
||||
it, and copy and distribute such modifications under the terms of Paragraph
|
||||
1 above, provided that you also do the following:
|
||||
|
||||
a) cause the modified files to carry prominent notices stating that
|
||||
you changed the files and the date of any change; and
|
||||
|
||||
b) cause the whole of any work that you distribute or publish, that
|
||||
in whole or in part contains the Program or any part thereof, either
|
||||
with or without modifications, to be licensed at no charge to all
|
||||
third parties under the terms of this General Public License (except
|
||||
that you may choose to grant warranty protection to some or all
|
||||
third parties, at your option).
|
||||
|
||||
c) If the modified program normally reads commands interactively when
|
||||
run, you must cause it, when started running for such interactive use
|
||||
in the simplest and most usual way, to print or display an
|
||||
announcement including an appropriate copyright notice and a notice
|
||||
that there is no warranty (or else, saying that you provide a
|
||||
warranty) and that users may redistribute the program under these
|
||||
conditions, and telling the user how to view a copy of this General
|
||||
Public License.
|
||||
|
||||
d) You may charge a fee for the physical act of transferring a
|
||||
copy, and you may at your option offer warranty protection in
|
||||
exchange for a fee.
|
||||
|
||||
Mere aggregation of another independent work with the Program (or its
|
||||
derivative) on a volume of a storage or distribution medium does not bring
|
||||
the other work under the scope of these terms.
|
||||
|
||||
3. You may copy and distribute the Program (or a portion or derivative of
|
||||
it, under Paragraph 2) in object code or executable form under the terms of
|
||||
Paragraphs 1 and 2 above provided that you also do one of the following:
|
||||
|
||||
a) accompany it with the complete corresponding machine-readable
|
||||
source code, which must be distributed under the terms of
|
||||
Paragraphs 1 and 2 above; or,
|
||||
|
||||
b) accompany it with a written offer, valid for at least three
|
||||
years, to give any third party free (except for a nominal charge
|
||||
for the cost of distribution) a complete machine-readable copy of the
|
||||
corresponding source code, to be distributed under the terms of
|
||||
Paragraphs 1 and 2 above; or,
|
||||
|
||||
c) accompany it with the information you received as to where the
|
||||
corresponding source code may be obtained. (This alternative is
|
||||
allowed only for noncommercial distribution and only if you
|
||||
received the program in object code or executable form alone.)
|
||||
|
||||
Source code for a work means the preferred form of the work for making
|
||||
modifications to it. For an executable file, complete source code means
|
||||
all the source code for all modules it contains; but, as a special
|
||||
exception, it need not include source code for modules which are standard
|
||||
libraries that accompany the operating system on which the executable
|
||||
file runs, or for standard header files or definitions files that
|
||||
accompany that operating system.
|
||||
|
||||
4. You may not copy, modify, sublicense, distribute or transfer the
|
||||
Program except as expressly provided under this General Public License.
|
||||
Any attempt otherwise to copy, modify, sublicense, distribute or transfer
|
||||
the Program is void, and will automatically terminate your rights to use
|
||||
the Program under this License. However, parties who have received
|
||||
copies, or rights to use copies, from you under this General Public
|
||||
License will not have their licenses terminated so long as such parties
|
||||
remain in full compliance.
|
||||
|
||||
5. By copying, distributing or modifying the Program (or any work based
|
||||
on the Program) you indicate your acceptance of this license to do so,
|
||||
and all its terms and conditions.
|
||||
|
||||
6. Each time you redistribute the Program (or any work based on the
|
||||
Program), the recipient automatically receives a license from the original
|
||||
licensor to copy, distribute or modify the Program subject to these
|
||||
terms and conditions. You may not impose any further restrictions on the
|
||||
recipients' exercise of the rights granted herein.
|
||||
|
||||
7. The Free Software Foundation may publish revised and/or new versions
|
||||
of the General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the Program
|
||||
specifies a version number of the license which applies to it and "any
|
||||
later version", you have the option of following the terms and conditions
|
||||
either of that version or of any later version published by the Free
|
||||
Software Foundation. If the Program does not specify a version number of
|
||||
the license, you may choose any version ever published by the Free Software
|
||||
Foundation.
|
||||
|
||||
8. If you wish to incorporate parts of the Program into other free
|
||||
programs whose distribution conditions are different, write to the author
|
||||
to ask for permission. For software which is copyrighted by the Free
|
||||
Software Foundation, write to the Free Software Foundation; we sometimes
|
||||
make exceptions for this. Our decision will be guided by the two goals
|
||||
of preserving the free status of all derivatives of our free software and
|
||||
of promoting the sharing and reuse of software generally.
|
||||
|
||||
NO WARRANTY
|
||||
|
||||
9. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
|
||||
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
|
||||
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
|
||||
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
|
||||
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
|
||||
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
|
||||
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
|
||||
REPAIR OR CORRECTION.
|
||||
|
||||
10. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
|
||||
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
|
||||
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
|
||||
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
|
||||
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
|
||||
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
|
||||
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGES.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Appendix: How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to humanity, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these
|
||||
terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest to
|
||||
attach them to the start of each source file to most effectively convey
|
||||
the exclusion of warranty; and each file should have at least the
|
||||
"copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) 19yy <name of author>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 1, or (at your option)
|
||||
any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
|
||||
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program is interactive, make it output a short notice like this
|
||||
when it starts in an interactive mode:
|
||||
|
||||
Gnomovision version 69, Copyright (C) 19xx name of author
|
||||
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the
|
||||
appropriate parts of the General Public License. Of course, the
|
||||
commands you use may be called something other than `show w' and `show
|
||||
c'; they could even be mouse-clicks or menu items--whatever suits your
|
||||
program.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or your
|
||||
school, if any, to sign a "copyright disclaimer" for the program, if
|
||||
necessary. Here a sample; alter the names:
|
||||
|
||||
Yoyodyne, Inc., hereby disclaims all copyright interest in the
|
||||
program `Gnomovision' (a program to direct compilers to make passes
|
||||
at assemblers) written by James Hacker.
|
||||
|
||||
<signature of Ty Coon>, 1 April 1989
|
||||
Ty Coon, President of Vice
|
||||
|
||||
That's all there is to it!
|
||||
|
||||
--- end of The GNU General Public License, Version 1, February 1989 ---
|
||||
|
||||
|
|
@ -0,0 +1,126 @@
|
|||
Changes History of significant changes to the DBI
|
||||
DBI.pm The Database Interface Module Perl code
|
||||
DBI.xs The Database Interface Module XS code
|
||||
DBIXS.h The DBI XS public interface for Drivers (DBD::...)
|
||||
Driver.xst Template driver xs file
|
||||
Driver_xst.h Template driver xs support code
|
||||
INSTALL
|
||||
LICENSE
|
||||
MANIFEST
|
||||
Makefile.PL The Makefile generator
|
||||
Perl.xs Test harness (currently) for Driver.xst
|
||||
README.md
|
||||
dbd_xsh.h Prototypes for standard Driver.xst interface
|
||||
dbi_sql.h Definitions based on SQL CLI / ODBC (#inc'd by DBIXS.h)
|
||||
dbipport.h Perl portability macros (from Devel::PPort)
|
||||
dbilogstrip.PL Utility to normalise DBI logs so they can be compared with diff
|
||||
dbiprof.PL
|
||||
dbiproxy.PL Frontend for DBI::ProxyServer
|
||||
dbivport.h DBI version portability macros (for drivers to copy)
|
||||
dbixs_rev.h Defines DBIXS_REVISION macro holding DBIXS.h subversion revision number
|
||||
dbixs_rev.pl Utility to write dbixs_rev.h
|
||||
ex/perl_dbi_nulls_test.pl A test script for forms of IS NULL qualification in SQL
|
||||
ex/profile.pl A test script for DBI::Profile
|
||||
ex/corogofer.pl A test script for DBD::Gofer::Transport::corostream
|
||||
lib/Bundle/DBI.pm A bundle for automatic installation via CPAN.
|
||||
lib/DBD/DBM.pm A driver for DBM files (uses DBD::File)
|
||||
lib/DBD/ExampleP.pm A very simple example Driver module
|
||||
lib/DBD/File.pm A driver base class for simple drivers
|
||||
lib/DBD/File/Developers.pod Developer documentation for DBD::File
|
||||
lib/DBD/File/Roadmap.pod Roadmap for DBD::File and other Pure Perl DBD's
|
||||
lib/DBD/File/HowTo.pod Guide to write a DBD::File based DBI driver
|
||||
lib/DBD/Gofer.pm DBD::Gofer 'stateless proxy' driver
|
||||
lib/DBD/Gofer/Policy/Base.pm
|
||||
lib/DBD/Gofer/Policy/pedantic.pm Safest and most transparent, but also slowest
|
||||
lib/DBD/Gofer/Policy/classic.pm Reasonable policy for typical usage
|
||||
lib/DBD/Gofer/Policy/rush.pm Raw speed, fewest round trips, least transparent
|
||||
lib/DBD/Gofer/Transport/Base.pm Base class for DBD::Gofer driver transport classes
|
||||
lib/DBD/Gofer/Transport/corostream.pm Async Gofer transport using Coro and AnyEvent
|
||||
lib/DBD/Gofer/Transport/null.pm DBD::Gofer transport that executes in same process (for testing)
|
||||
lib/DBD/Gofer/Transport/pipeone.pm DBD::Gofer transport to new subprocess for each request
|
||||
lib/DBD/Gofer/Transport/stream.pm DBD::Gofer transport for ssh etc
|
||||
lib/DBD/Mem.pm A pure-perl in-memory driver using DBI::DBD::SqlEngine
|
||||
lib/DBD/NullP.pm An empty example Driver module
|
||||
lib/DBD/Proxy.pm Proxy driver
|
||||
lib/DBD/Sponge.pm A driver for fake cursors (precached data)
|
||||
lib/DBI/Const/GetInfo/ANSI.pm GetInfo data based on ANSI standard
|
||||
lib/DBI/Const/GetInfo/ODBC.pm GetInfo data based on ODBC standard
|
||||
lib/DBI/Const/GetInfoReturn.pm GetInfo return values plus tools based on standards
|
||||
lib/DBI/Const/GetInfoType.pm GetInfo type code data based on standards
|
||||
lib/DBI/DBD.pm Some basic help for people writing DBI drivers
|
||||
lib/DBI/DBD/Metadata.pm Metadata tools for people writing DBI drivers
|
||||
lib/DBI/DBD/SqlEngine.pm SQL Engine for drivers without an own
|
||||
lib/DBI/DBD/SqlEngine/Developers.pod DBI::DBD::SqlEngine API Documentation
|
||||
lib/DBI/DBD/SqlEngine/HowTo.pod HowTo ... write a DBI::DBD::SqlEngine based driver
|
||||
lib/DBI/Gofer/Execute.pm Execution logic for DBD::Gofer server
|
||||
lib/DBI/Gofer/Request.pm Request object from DBD::Gofer
|
||||
lib/DBI/Gofer/Response.pm Response object for DBD::Gofer
|
||||
lib/DBI/Gofer/Serializer/Base.pm
|
||||
lib/DBI/Gofer/Serializer/DataDumper.pm
|
||||
lib/DBI/Gofer/Serializer/Storable.pm
|
||||
lib/DBI/Gofer/Transport/Base.pm Base class for DBD::Gofer server transport classes
|
||||
lib/DBI/Gofer/Transport/pipeone.pm DBD::Gofer transport for single requests
|
||||
lib/DBI/Gofer/Transport/stream.pm DBI::Gofer transport for ssh etc
|
||||
lib/DBI/Profile.pm Manage DBI usage profile data
|
||||
lib/DBI/ProfileData.pm
|
||||
lib/DBI/ProfileDumper.pm
|
||||
lib/DBI/ProfileDumper/Apache.pm
|
||||
lib/DBI/ProfileSubs.pm
|
||||
lib/DBI/ProxyServer.pm The proxy drivers server
|
||||
lib/DBI/PurePerl.pm A DBI.xs emulation in Perl
|
||||
lib/DBI/SQL/Nano.pm A 'smaller than micro' SQL parser
|
||||
lib/DBI/Util/_accessor.pm A very¬cut-down version of Class::Accessor::Fast
|
||||
lib/DBI/Util/CacheMemory.pm A very cut-down version of Cache::Memory
|
||||
lib/DBI/W32ODBC.pm An experimental DBI emulation layer for Win32::ODBC
|
||||
lib/Win32/DBIODBC.pm An experimental Win32::ODBC emulation layer for DBI
|
||||
t/01basics.t
|
||||
t/02dbidrv.t
|
||||
t/03handle.t
|
||||
t/04mods.t
|
||||
t/05concathash.t
|
||||
t/06attrs.t
|
||||
t/07kids.t
|
||||
t/08keeperr.t
|
||||
t/09trace.t
|
||||
t/10examp.t
|
||||
t/11fetch.t
|
||||
t/12quote.t
|
||||
t/13taint.t
|
||||
t/14utf8.t
|
||||
t/15array.t
|
||||
t/16destroy.t
|
||||
t/17handle_error.t
|
||||
t/19fhtrace.t
|
||||
t/20meta.t
|
||||
t/30subclass.t
|
||||
t/31methcache.t Test caching of inner methods
|
||||
t/35thrclone.t
|
||||
t/40profile.t
|
||||
t/41prof_dump.t
|
||||
t/42prof_data.t
|
||||
t/43prof_env.t
|
||||
t/48dbi_dbd_sqlengine.t Tests for DBI::DBD::SqlEngine
|
||||
t/49dbd_file.t DBD::File API and very basic tests
|
||||
t/50dbm_simple.t simple DBD::DBM tests
|
||||
t/51dbm_file.t extended DBD::File tests (through DBD::DBM)
|
||||
t/52dbm_complex.t Complex DBD::DBM tests with SQL::Statement
|
||||
t/53sqlengine_adv.t
|
||||
t/54_dbd_mem.t
|
||||
t/60preparse.t
|
||||
t/65transact.t
|
||||
t/70callbacks.t
|
||||
t/72childhandles.t
|
||||
t/73cachedkids.t
|
||||
t/80proxy.t
|
||||
t/85gofer.t
|
||||
t/86gofer_fail.t
|
||||
t/87gofer_cache.t
|
||||
t/90sql_type_cast.t
|
||||
t/91_store_warning.t
|
||||
t/lib.pl Utility functions for test scripts
|
||||
t/pod.t
|
||||
t/pod-coverage.t
|
||||
test.pl Assorted informal tests, including tests for memory leaks
|
||||
typemap
|
||||
META.yml Module YAML meta-data (added by MakeMaker)
|
||||
META.json Module JSON meta-data (added by MakeMaker)
|
|
@ -0,0 +1,71 @@
|
|||
{
|
||||
"abstract" : "Database independent interface for Perl",
|
||||
"author" : [
|
||||
"Tim Bunce (dbi-users@perl.org)"
|
||||
],
|
||||
"dynamic_config" : 1,
|
||||
"generated_by" : "ExtUtils::MakeMaker version 7.24, CPAN::Meta::Converter version 2.150010",
|
||||
"license" : [
|
||||
"perl_5"
|
||||
],
|
||||
"meta-spec" : {
|
||||
"url" : "http://search.cpan.org/perldoc?CPAN::Meta::Spec",
|
||||
"version" : "2"
|
||||
},
|
||||
"name" : "DBI",
|
||||
"no_index" : {
|
||||
"directory" : [
|
||||
"t",
|
||||
"inc"
|
||||
]
|
||||
},
|
||||
"prereqs" : {
|
||||
"build" : {
|
||||
"requires" : {
|
||||
"ExtUtils::MakeMaker" : "6.48",
|
||||
"Test::Simple" : "0.90"
|
||||
}
|
||||
},
|
||||
"configure" : {
|
||||
"requires" : {
|
||||
"ExtUtils::MakeMaker" : "0"
|
||||
}
|
||||
},
|
||||
"runtime" : {
|
||||
"conflicts" : {
|
||||
"DBD::Amazon" : "0.10",
|
||||
"DBD::AnyData" : "0.110",
|
||||
"DBD::CSV" : "0.36",
|
||||
"DBD::Google" : "0.51",
|
||||
"DBD::PO" : "2.10",
|
||||
"DBD::RAM" : "0.072",
|
||||
"SQL::Statement" : "1.33"
|
||||
},
|
||||
"requires" : {
|
||||
"perl" : "5.008001"
|
||||
}
|
||||
}
|
||||
},
|
||||
"release_status" : "stable",
|
||||
"resources" : {
|
||||
"homepage" : "http://dbi.perl.org/",
|
||||
"license" : [
|
||||
"http://dev.perl.org/licenses/"
|
||||
],
|
||||
"repository" : {
|
||||
"url" : "https://github.com/perl5-dbi/dbi"
|
||||
},
|
||||
"x_IRC" : "irc://irc.perl.org/#dbi",
|
||||
"x_MailingList" : "mailto:dbi-dev@perl.org"
|
||||
},
|
||||
"version" : "1.643",
|
||||
"x_serialization_backend" : "JSON::PP version 2.27400_02",
|
||||
"x_suggests" : {
|
||||
"Clone" : 0.34,
|
||||
"DB_File" : 0,
|
||||
"MLDBM" : 0,
|
||||
"Net::Daemon" : 0,
|
||||
"RPC::PlServer" : 0.2001,
|
||||
"SQL::Statement" : 1.402
|
||||
}
|
||||
}
|
|
@ -0,0 +1,45 @@
|
|||
---
|
||||
abstract: 'Database independent interface for Perl'
|
||||
author:
|
||||
- 'Tim Bunce (dbi-users@perl.org)'
|
||||
build_requires:
|
||||
ExtUtils::MakeMaker: '6.48'
|
||||
Test::Simple: '0.90'
|
||||
configure_requires:
|
||||
ExtUtils::MakeMaker: '0'
|
||||
conflicts:
|
||||
DBD::Amazon: '0.10'
|
||||
DBD::AnyData: '0.110'
|
||||
DBD::CSV: '0.36'
|
||||
DBD::Google: '0.51'
|
||||
DBD::PO: '2.10'
|
||||
DBD::RAM: '0.072'
|
||||
SQL::Statement: '1.33'
|
||||
dynamic_config: 1
|
||||
generated_by: 'ExtUtils::MakeMaker version 7.24, CPAN::Meta::Converter version 2.150010'
|
||||
license: perl
|
||||
meta-spec:
|
||||
url: http://module-build.sourceforge.net/META-spec-v1.4.html
|
||||
version: '1.4'
|
||||
name: DBI
|
||||
no_index:
|
||||
directory:
|
||||
- t
|
||||
- inc
|
||||
requires:
|
||||
perl: '5.008001'
|
||||
resources:
|
||||
IRC: irc://irc.perl.org/#dbi
|
||||
MailingList: mailto:dbi-dev@perl.org
|
||||
homepage: http://dbi.perl.org/
|
||||
license: http://dev.perl.org/licenses/
|
||||
repository: https://github.com/perl5-dbi/dbi
|
||||
version: '1.643'
|
||||
x_serialization_backend: 'CPAN::Meta::YAML version 0.018'
|
||||
x_suggests:
|
||||
Clone: 0.34
|
||||
DB_File: 0
|
||||
MLDBM: 0
|
||||
Net::Daemon: 0
|
||||
RPC::PlServer: 0.2001
|
||||
SQL::Statement: 1.402
|
|
@ -0,0 +1,361 @@
|
|||
# -*- perl -*-
|
||||
#
|
||||
# $Id$
|
||||
#
|
||||
# Copyright (c) 1994-2010 Tim Bunce Ireland
|
||||
#
|
||||
# See COPYRIGHT section in DBI.pm for usage and distribution rights.
|
||||
|
||||
use 5.008_001;
|
||||
|
||||
use ExtUtils::MakeMaker 5.16, qw(WriteMakefile $Verbose prompt);
|
||||
use Getopt::Long;
|
||||
use Config;
|
||||
use File::Find;
|
||||
use File::Spec;
|
||||
use strict;
|
||||
|
||||
use lib 'lib'; # for use DBI::DBD
|
||||
use DBI::DBD;
|
||||
|
||||
$| = 1;
|
||||
$^W = 1;
|
||||
my $os = $^O;
|
||||
my $osvers = $Config{osvers};
|
||||
$osvers =~ s/^\s*(\d+\.\d+).*/$1/; # drop sub-sub-version: 2.5.1 -> 2.5
|
||||
my $ext_pl = $^O eq 'VMS' ? '.pl' : '';
|
||||
my $is_developer = ((-d ".svn" || -d ".git") && -f "MANIFEST.SKIP");
|
||||
|
||||
$::opt_v = 0;
|
||||
$::opt_thread = $Config{useithreads}; # thread if we can, use "-nothread" to disable
|
||||
$::opt_g = 0;
|
||||
$::opt_g = 1 if $is_developer && $ENV{LOGNAME} && $ENV{LOGNAME} eq 'timbo'; # it's me! (probably)
|
||||
|
||||
GetOptions(qw(v! g! thread!))
|
||||
or die "Invalid arguments\n";
|
||||
|
||||
$::opt_g &&= '-g'; # convert to actual string
|
||||
|
||||
|
||||
if (($ENV{LANG}||'') =~ m/utf-?8/i) {
|
||||
print "\n";
|
||||
print "*** Your LANG environment variable is set to '$ENV{LANG}'\n";
|
||||
print "*** This may cause problems for some perl installations.\n";
|
||||
print "*** If you get test failures, please try again with LANG unset.\n";
|
||||
print "*** If that then works, please email dbi-dev\@perl.org with details\n";
|
||||
print "*** including the output of 'perl -V'\n";
|
||||
print "\n";
|
||||
sleep 1;
|
||||
}
|
||||
|
||||
my %opts = (
|
||||
NAME => 'DBI',
|
||||
AUTHOR => 'Tim Bunce (dbi-users@perl.org)',
|
||||
VERSION_FROM => 'DBI.pm',
|
||||
ABSTRACT_FROM => 'DBI.pm',
|
||||
MIN_PERL_VERSION => '5.008001',
|
||||
BUILD_REQUIRES => {
|
||||
'ExtUtils::MakeMaker' => '6.48',
|
||||
'Test::Simple' => '0.90',
|
||||
},
|
||||
META_MERGE => {
|
||||
resources => {
|
||||
repository => 'https://github.com/perl5-dbi/dbi',
|
||||
MailingList => 'mailto:dbi-dev@perl.org',
|
||||
license => 'http://dev.perl.org/licenses/',
|
||||
homepage => 'http://dbi.perl.org/',
|
||||
IRC => 'irc://irc.perl.org/#dbi',
|
||||
},
|
||||
suggests => {
|
||||
'RPC::PlServer' => 0.2001,
|
||||
'Net::Daemon' => 0,
|
||||
'SQL::Statement' => 1.402,
|
||||
'Clone' => 0.34,
|
||||
'MLDBM' => 0,
|
||||
'DB_File' => 0,
|
||||
},
|
||||
},
|
||||
PREREQ_PM => {
|
||||
( $^O eq 'MSWin32' ? ( 'File::Spec' => 3.31, ) : () ),
|
||||
},
|
||||
CONFLICTS => {
|
||||
'SQL::Statement' => '1.33',
|
||||
'DBD::AnyData' => '0.110',
|
||||
'DBD::CSV' => '0.36',
|
||||
'DBD::RAM' => '0.072',
|
||||
'DBD::PO' => '2.10',
|
||||
'DBD::Google' => '0.51',
|
||||
'DBD::Amazon' => '0.10',
|
||||
},
|
||||
LICENSE => 'perl',
|
||||
EXE_FILES => [ "dbiproxy$ext_pl", "dbiprof$ext_pl", "dbilogstrip$ext_pl" ],
|
||||
DIR => [ ],
|
||||
dynamic_lib => { OTHERLDFLAGS => "$::opt_g" },
|
||||
clean => { FILES=> "\$(DISTVNAME) Perl.xsi t/zv*_*.t dbi__null_test_tmp* test_output_*"
|
||||
." dbiproxy$ext_pl dbiprof$ext_pl dbilogstrip$ext_pl dbiproxy.*log dbitrace.log dbi*.prof ndtest.prt" },
|
||||
dist => {
|
||||
DIST_DEFAULT=> 'clean distcheck disttest tardist',
|
||||
PREOP => '$(MAKE) -f Makefile.old distdir',
|
||||
COMPRESS => 'gzip -v9', SUFFIX => 'gz',
|
||||
},
|
||||
);
|
||||
$opts{CAPI} = 'TRUE' if $Config{archname} =~ /-object\b/i;
|
||||
|
||||
if (my $gccversion = $Config{gccversion}) { # ask gcc to be more pedantic
|
||||
warn "WARNING: Your GNU C $gccversion compiler is very old. Please upgrade it and rebuild perl.\n"
|
||||
if $gccversion =~ m/^\D*(1|2\.[1-8])/;
|
||||
print "Your perl was compiled with gcc (version $Config{gccversion}), okay.\n";
|
||||
$gccversion =~ s/[^\d\.]//g; # just a number please
|
||||
$opts{DEFINE} .= ' -W -Wall -Wpointer-arith -Wbad-function-cast';
|
||||
$opts{DEFINE} .= ' -Wno-comment -Wno-sign-compare -Wno-cast-qual';
|
||||
$opts{DEFINE} .= ' -Wmissing-noreturn -Wno-unused-parameter' if $gccversion ge "3.0";
|
||||
if ($is_developer && $::opt_g) {
|
||||
$opts{DEFINE} .= ' -DPERL_GCC_PEDANTIC -ansi -pedantic' if $gccversion ge "3.0";
|
||||
$opts{DEFINE} .= ' -Wdisabled-optimization -Wformat' if $gccversion ge "3.0";
|
||||
$opts{DEFINE} .= ' -Wmissing-prototypes';
|
||||
}
|
||||
}
|
||||
|
||||
$opts{DEFINE} .= ' -DDBI_NO_THREADS' unless $::opt_thread;
|
||||
|
||||
# HP-UX 9 cannot link a non-PIC object file into a shared library.
|
||||
# Since the # .a libs that Oracle supplies contain non-PIC object
|
||||
# files, we sadly have to build static on HP-UX 9 :(
|
||||
if ($os eq 'hpux' and $osvers < 10) {
|
||||
$opts{LINKTYPE} = 'static';
|
||||
print "Warning: Forced to build static not dynamic on $os $osvers.\a\n";
|
||||
print "** Note: DBI will be built *into* a NEW perl binary. You MUST use that new perl.\n";
|
||||
print " See README and Makefile.PL for more information.\a\n";
|
||||
}
|
||||
|
||||
if ($os eq 'MSWin32' && $Config{libs} =~ /\bPerlCRT.lib\b/
|
||||
&& -f "$Config{archlib}/CORE/PerlCRT.lib") {
|
||||
# ActiveState Perl needs this; should better be done in MakeMaker, but
|
||||
# as a temporary workaround it seems ok.
|
||||
$opts{LIBS} = "-L$Config{archlib}/CORE";
|
||||
}
|
||||
|
||||
# Set aside some values for post_initialize() in package MY
|
||||
my ( $cfg_privlibexp, $cfg_archlibexp, $cfg_sitelibexp, $cfg_sitearchexp,
|
||||
$cfg_man3direxp ) =
|
||||
@Config{qw( privlibexp archlibexp sitelibexp sitearchexp man3direxp ) };
|
||||
for ( $cfg_privlibexp, $cfg_archlibexp, $cfg_sitelibexp, $cfg_sitearchexp,
|
||||
$cfg_man3direxp ) {
|
||||
$_ = '' unless defined $_;
|
||||
}
|
||||
|
||||
my $conflictMsg = <<EOCM;
|
||||
***
|
||||
This version of DBI conflicts with the version of
|
||||
module %s (%s) you have installed.
|
||||
|
||||
It's strongly recommended that you update it after
|
||||
installing this version of DBI.
|
||||
***
|
||||
EOCM
|
||||
|
||||
sub CheckConflicts {
|
||||
my %params = @_;
|
||||
my %conflicts = %{ $params{CONFLICTS} };
|
||||
my $found = 0;
|
||||
|
||||
while ( my ( $module, $version ) = each(%conflicts) ) {
|
||||
undef $@;
|
||||
eval "require $module";
|
||||
next if $@;
|
||||
my $installed = eval "\$" . $module . "::VERSION";
|
||||
if ( $installed le $version ) {
|
||||
++$found;
|
||||
my $msg = $conflictMsg;
|
||||
my $warning = sprintf( $msg, $module, $installed );
|
||||
warn $warning;
|
||||
}
|
||||
}
|
||||
|
||||
return !$found;
|
||||
}
|
||||
|
||||
sub WriteMakefile1 {
|
||||
#Written by Alexandr Ciornii, version 0.21. Added by eumm-upgrade.
|
||||
my %params = @_;
|
||||
my $eumm_version = $ExtUtils::MakeMaker::VERSION;
|
||||
$eumm_version = eval $eumm_version;
|
||||
die "EXTRA_META is deprecated" if ( exists( $params{EXTRA_META} ) );
|
||||
die "License not specified" if ( !exists( $params{LICENSE} ) );
|
||||
if ( $params{BUILD_REQUIRES} and ( $eumm_version < 6.5503 ) ) {
|
||||
#EUMM 6.5502 has problems with BUILD_REQUIRES
|
||||
$params{PREREQ_PM} = { %{ $params{PREREQ_PM} || {} }, %{ $params{BUILD_REQUIRES} } };
|
||||
delete $params{BUILD_REQUIRES};
|
||||
}
|
||||
|
||||
# more or less taken from Moose' Makefile.PL
|
||||
if ( $params{CONFLICTS} ) {
|
||||
my $ok = CheckConflicts(%params);
|
||||
exit(0) if ( $params{PREREQ_FATAL} and not $ok );
|
||||
my $cpan_smoker = grep { $_ =~ m/(?:CR_SMOKER|CPAN_REPORTER|AUTOMATED_TESTING)/ } keys %ENV;
|
||||
unless ( $cpan_smoker || $ENV{PERL_MM_USE_DEFAULT} ) {
|
||||
sleep 4 unless ($ok);
|
||||
}
|
||||
%{$params{META_MERGE}{conflicts}} = %{$params{CONFLICTS}};
|
||||
delete $params{CONFLICTS};
|
||||
}
|
||||
|
||||
delete $params{CONFIGURE_REQUIRES} if ( $eumm_version < 6.52 );
|
||||
delete $params{MIN_PERL_VERSION} if ( $eumm_version < 6.48 );
|
||||
delete $params{META_MERGE} if ( $eumm_version < 6.46 );
|
||||
delete $params{META_ADD} if ( $eumm_version < 6.46 );
|
||||
delete $params{LICENSE} if ( $eumm_version < 6.31 );
|
||||
|
||||
WriteMakefile(%params);
|
||||
}
|
||||
|
||||
$Verbose = $::opt_v;
|
||||
WriteMakefile1(
|
||||
dbd_edit_mm_attribs(\%opts, {
|
||||
create_pp_tests => 1,
|
||||
create_nano_tests => 1,
|
||||
create_gap_tests => 1,
|
||||
})
|
||||
);
|
||||
# WriteMakefile call is last thing executed
|
||||
# so return value is propagated
|
||||
|
||||
|
||||
# =====================================================================
|
||||
|
||||
package MY;
|
||||
|
||||
sub postamble {
|
||||
warn <<EOT;
|
||||
|
||||
I see you're using perl $] on $Config::Config{archname}, okay.
|
||||
Remember to actually *read* the README file!
|
||||
Use 'make' to build the software (dmake or nmake on Windows).
|
||||
Then 'make test' to execute self tests.
|
||||
Then 'make install' to install the DBI and then delete this working
|
||||
directory before unpacking and building any DBD::* drivers.
|
||||
|
||||
EOT
|
||||
warn <<EOT if $os eq 'MSWin32';
|
||||
Windows users need to use the correct make command.
|
||||
That may be nmake or dmake depending on which Perl you are using.
|
||||
If using the Win32 ActiveState build then it is recommended that you
|
||||
use the ppm utility to fetch and install a prebuilt DBI instead.
|
||||
|
||||
EOT
|
||||
return "";
|
||||
}
|
||||
|
||||
sub libscan {
|
||||
my($self, $path) = @_;
|
||||
($path =~ /\~$|\B\.(svn|git)\b/) ? undef : $path;
|
||||
}
|
||||
|
||||
sub const_cccmd {
|
||||
my $self = shift;
|
||||
local($_) = $self->SUPER::const_cccmd(@_);
|
||||
# If perl Makefile.PL *-g* then switch on debugging
|
||||
if ($::opt_g) {
|
||||
s/\s-O\d?\b//; # delete optimise option
|
||||
s/\s-/ -g -/; # add -g option
|
||||
}
|
||||
$_;
|
||||
}
|
||||
|
||||
|
||||
sub post_initialize {
|
||||
my($self) = shift;
|
||||
|
||||
if ($cfg_privlibexp ne $cfg_sitelibexp) {
|
||||
# this block could probably be removed now
|
||||
my %old;
|
||||
File::Find::find( sub {
|
||||
local $_ = $File::Find::name;
|
||||
s:\\:/:g if $os eq 'MSWin32';
|
||||
$File::Find::prune = 1, return
|
||||
if -d $_ && ( $_ eq $cfg_sitelibexp ||
|
||||
$_ eq $cfg_sitearchexp ||
|
||||
$_ eq $cfg_man3direxp );
|
||||
++$old{$_} if m:\bDB(I|D$):; # DBI files, but just DBD dirs
|
||||
}, $cfg_privlibexp, $cfg_archlibexp );
|
||||
if ( %old ) {
|
||||
warn "
|
||||
Warning: By default new modules are installed into your 'site_lib'
|
||||
directories. Since site_lib directories come after the normal library
|
||||
directories you must delete old DBI files and directories from your
|
||||
'privlib' and 'archlib' directories and their auto subdirectories.
|
||||
|
||||
Reinstall DBI and your DBD::* drivers after deleting the old directories.
|
||||
|
||||
Here's a list of probable old files and directories:
|
||||
|
||||
" . join( "\n ", ( sort keys %old ), "\n" );
|
||||
}
|
||||
}
|
||||
|
||||
# install files that DBD's may need
|
||||
File::Find::find( sub {
|
||||
|
||||
# may be '.' or '[]' depending on File::Find version
|
||||
$_ = '.' if $^O eq 'VMS' && $_ eq File::Spec->curdir;
|
||||
|
||||
$File::Find::prune = 1, return if -d $_ && '.' ne $_;
|
||||
$self->{PM}->{$_} = File::Spec->catfile($self->{INST_ARCHAUTODIR}, $_)
|
||||
if '.h' eq substr( $_, -2 ) || '.xst' eq substr( $_, -4 );
|
||||
}, '.' );
|
||||
|
||||
delete $self->{$_}{"git-svn-vsn.pl"} for qw( PM MAN3PODS );
|
||||
|
||||
return '';
|
||||
}
|
||||
|
||||
|
||||
sub post_constants {
|
||||
my($self) = shift;
|
||||
|
||||
# ensure that Driver.xst and related code gets tested
|
||||
my $xst = main::dbd_postamble();
|
||||
$xst =~ s/\$\(BASEEXT\)/Perl/g;
|
||||
$xst .= '
|
||||
dbixs_rev.h: DBIXS.h Driver_xst.h dbipport.h dbivport.h dbixs_rev.pl
|
||||
$(PERL) dbixs_rev.pl
|
||||
|
||||
DBI.c: Perl$(OBJ_EXT)
|
||||
|
||||
# make Changes file available as installed pod docs "perldoc DBI::Changes"
|
||||
inst_libdbi = ' . File::Spec->catdir($self->{INST_LIB}, 'DBI') . '
|
||||
changes_pm = ' . File::Spec->catfile($self->{INST_LIB}, 'DBI', 'Changes.pm') . '
|
||||
'.q{
|
||||
|
||||
config :: $(changes_pm)
|
||||
$(NOECHO) $(NOOP)
|
||||
|
||||
$(changes_pm): Changes
|
||||
$(MKPATH) $(inst_libdbi)
|
||||
$(RM_F) $(changes_pm)
|
||||
$(CP) Changes $(changes_pm)
|
||||
|
||||
ptest: all
|
||||
prove --blib --jobs 8 --shuffle
|
||||
|
||||
faq:
|
||||
: checkin any local changes not already checked in before overwriting
|
||||
svn commit --message "dbi.tiddlyspot.com FAQ update" dbi.tiddlyspot.com.html
|
||||
wget --ignore-length --output-document=dbi.tiddlyspot.com.html --timestamping http://dbi.tiddlyspot.com/download
|
||||
svn commit --message "dbi.tiddlyspot.com FAQ update" dbi.tiddlyspot.com.html
|
||||
|
||||
checkkeywords:
|
||||
$(RM_RF) blib
|
||||
find . -type f \( -name .svn -prune -o -name \*.pm -o -name \*.PL -o -name \*.pl \) \
|
||||
-exec bash -c '[ -z "$$(svn pg svn:keywords {})" ] && echo svn propset svn:keywords \"Id Revision\" {}' \;
|
||||
|
||||
checkpod:
|
||||
$(RM_RF) blib
|
||||
find . -type f \( -name .svn -prune -o -name \*.pm -o -name \*.PL -o -name \*.pl \) \
|
||||
-exec podchecker {} \; 2>&1 | grep -v 'pod syntax OK'
|
||||
};
|
||||
|
||||
return $xst;
|
||||
}
|
||||
|
||||
# end.
|
|
@ -0,0 +1,54 @@
|
|||
/* This is a skeleton driver that only serves as a basic sanity check
|
||||
that the Driver.xst mechansim doesn't have compile-time errors in it.
|
||||
vim: ts=8:sw=4:expandtab
|
||||
*/
|
||||
|
||||
#define PERL_NO_GET_CONTEXT
|
||||
#include "DBIXS.h"
|
||||
#include "dbd_xsh.h"
|
||||
|
||||
#undef DBIh_SET_ERR_CHAR /* to syntax check emulation */
|
||||
#include "dbivport.h"
|
||||
|
||||
DBISTATE_DECLARE;
|
||||
|
||||
|
||||
struct imp_drh_st {
|
||||
dbih_drc_t com; /* MUST be first element in structure */
|
||||
};
|
||||
struct imp_dbh_st {
|
||||
dbih_dbc_t com; /* MUST be first element in structure */
|
||||
};
|
||||
struct imp_sth_st {
|
||||
dbih_stc_t com; /* MUST be first element in structure */
|
||||
};
|
||||
|
||||
|
||||
|
||||
#define dbd_discon_all(drh, imp_drh) (drh=drh,imp_drh=imp_drh,1)
|
||||
#define dbd_dr_data_sources(drh, imp_drh, attr) (drh=drh,imp_drh=imp_drh,attr=attr,Nullav)
|
||||
#define dbd_db_do4_iv(dbh,imp_dbh,p3,p4) (dbh=dbh,imp_dbh=imp_dbh,(void*)p3,p4=p4,-2)
|
||||
#define dbd_db_last_insert_id(dbh, imp_dbh, p3,p4,p5,p6, attr) \
|
||||
(dbh=dbh,imp_dbh=imp_dbh,p3=p3,p4=p4,p5=p5,p6=p6,attr=attr,&PL_sv_undef)
|
||||
#define dbd_take_imp_data(h, imp_xxh, p3) (h=h,imp_xxh=imp_xxh,&PL_sv_undef)
|
||||
#define dbd_st_execute_for_fetch(sth, imp_sth, p3, p4) \
|
||||
(sth=sth,imp_sth=imp_sth,p3=p3,p4=p4,&PL_sv_undef)
|
||||
|
||||
#define dbd_st_bind_col(sth, imp_sth, param, ref, sql_type, attribs) \
|
||||
(sth=sth,imp_sth=imp_sth,param=param,ref=ref,sql_type=sql_type,attribs=attribs,1)
|
||||
|
||||
int /* just to test syntax of macros etc */
|
||||
dbd_st_rows(SV *h, imp_sth_t *imp_sth)
|
||||
{
|
||||
dTHX;
|
||||
PERL_UNUSED_VAR(h);
|
||||
DBIh_SET_ERR_CHAR(h, imp_sth, 0, 1, "err msg", "12345", Nullch);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
MODULE = DBD::Perl PACKAGE = DBD::Perl
|
||||
|
||||
INCLUDE: Perl.xsi
|
||||
|
||||
# vim:sw=4:ts=8
|
|
@ -0,0 +1,93 @@
|
|||
# DBI - The Perl Database Interface.
|
||||
|
||||
[](http://travis-ci.org/perl5-dbi/dbi/)
|
||||
|
||||
See [COPYRIGHT](https://metacpan.org/module/DBI#COPYRIGHT)
|
||||
section in DBI.pm for usage and distribution rights.
|
||||
|
||||
See [GETTING HELP](https://metacpan.org/module/DBI#GETTING-HELP)
|
||||
section in DBI.pm for how to get help.
|
||||
|
||||
# QUICK START GUIDE:
|
||||
|
||||
The DBI requires one or more 'driver' modules to talk to databases,
|
||||
but they are not needed to build or install the DBI.
|
||||
|
||||
Check that a DBD::* module exists for the database you wish to use.
|
||||
|
||||
Install the DBI using a installer like cpanm, cpanplus, cpan,
|
||||
or whatever is recommened by the perl distribution you're using.
|
||||
Make sure the DBI tests run successfully before installing.
|
||||
|
||||
Use the 'perldoc DBI' command to read the DBI documentation.
|
||||
|
||||
Install the DBD::* driver module you wish to use in the same way.
|
||||
It is often important to read the driver README file carefully.
|
||||
Make sure the driver tests run successfully before installing.
|
||||
|
||||
The DBI.pm file contains the DBI specification and other documentation.
|
||||
PLEASE READ IT. It'll save you asking questions on the mailing list
|
||||
which you will be told are already answered in the documentation.
|
||||
|
||||
For more information and to keep informed about progress you can join
|
||||
the a mailing list via mailto:dbi-users-help@perl.org
|
||||
You can post to the mailing list without subscribing. (Your first post may be
|
||||
delayed a day or so while it's being moderated.)
|
||||
|
||||
To help you make the best use of the dbi-users mailing list,
|
||||
and any other lists or forums you may use, I strongly
|
||||
recommend that you read "How To Ask Questions The Smart Way"
|
||||
by Eric Raymond:
|
||||
|
||||
http://www.catb.org/~esr/faqs/smart-questions.html
|
||||
|
||||
Much useful information and online archives of the mailing lists can be
|
||||
found at http://dbi.perl.org/
|
||||
|
||||
See also http://metacpan.org/
|
||||
|
||||
|
||||
# IF YOU HAVE PROBLEMS:
|
||||
|
||||
First, read the notes in the INSTALL file.
|
||||
|
||||
If you can't fix it your self please post details to dbi-users@perl.org.
|
||||
Please include:
|
||||
|
||||
1. A complete log of a complete build, e.g.:
|
||||
|
||||
perl Makefile.PL (do a make realclean first)
|
||||
make
|
||||
make test
|
||||
make test TEST_VERBOSE=1 (if any of the t/* tests fail)
|
||||
|
||||
2. The output of perl -V
|
||||
|
||||
3. If you get a core dump, try to include a stack trace from it.
|
||||
|
||||
Try installing the Devel::CoreStack module to get a stack trace.
|
||||
If the stack trace mentions XS_DynaLoader_dl_load_file then rerun
|
||||
make test after setting the environment variable PERL_DL_DEBUG to 2.
|
||||
|
||||
4. If your installation succeeds, but your script does not behave
|
||||
as you expect, the problem is possibly in your script.
|
||||
|
||||
Before sending to dbi-users, try writing a small, easy to use test case to
|
||||
reproduce your problem. Also, use the DBI->trace method to trace your
|
||||
database calls.
|
||||
|
||||
Please don't post problems to usenet, google groups or perl5-porters.
|
||||
This software is supported via the dbi-users mailing list. For more
|
||||
information and to keep informed about progress you can join the
|
||||
mailing list via mailto:dbi-users-help@perl.org
|
||||
(please note that I do not run or manage the mailing list).
|
||||
|
||||
It is important to check that you are using the latest version before
|
||||
posting. If you're not then we're very likely to simply say "upgrade to
|
||||
the latest". You would do yourself a favour by upgrading beforehand.
|
||||
|
||||
Please remember that we're all busy. Try to help yourself first,
|
||||
then try to help us help you by following these guidelines carefully.
|
||||
|
||||
Regards,
|
||||
Tim Bunce and the perl5-dbi team.
|
|
@ -0,0 +1,63 @@
|
|||
/* @(#)$Id$
|
||||
*
|
||||
* Copyright 2000-2002 Tim Bunce
|
||||
* Copyright 2002 Jonathan Leffler
|
||||
*
|
||||
* These prototypes are for dbdimp.c funcs used in the XS file.
|
||||
* These names are #defined to driver specific names by the
|
||||
* dbdimp.h file in the driver source.
|
||||
*/
|
||||
|
||||
#ifndef DBI_DBD_XSH_H
|
||||
#define DBI_DBD_XSH_H
|
||||
|
||||
void dbd_init _((dbistate_t *dbistate));
|
||||
|
||||
int dbd_discon_all _((SV *drh, imp_drh_t *imp_drh));
|
||||
SV *dbd_take_imp_data _((SV *h, imp_xxh_t *imp_xxh, void *foo));
|
||||
|
||||
/* Support for dbd_dr_data_sources and dbd_db_do added to Driver.xst in DBI v1.33 */
|
||||
/* dbd_dr_data_sources: optional: defined by a driver that calls a C */
|
||||
/* function to get the list of data sources */
|
||||
AV *dbd_dr_data_sources(SV *drh, imp_drh_t *imp_drh, SV *attrs);
|
||||
|
||||
int dbd_db_login6_sv _((SV *dbh, imp_dbh_t *imp_dbh, SV *dbname, SV *uid, SV *pwd, SV*attribs));
|
||||
int dbd_db_login6 _((SV *dbh, imp_dbh_t *imp_dbh, char *dbname, char *uid, char *pwd, SV*attribs)); /* deprecated */
|
||||
int dbd_db_login _((SV *dbh, imp_dbh_t *imp_dbh, char *dbname, char *uid, char *pwd)); /* deprecated */
|
||||
/* Note: interface of dbd_db_do changed in v1.33 */
|
||||
/* Old prototype: dbd_db_do _((SV *sv, char *statement)); */
|
||||
/* dbd_db_do: optional: defined by a driver if the DBI default version is too slow */
|
||||
int dbd_db_do4 _((SV *dbh, imp_dbh_t *imp_dbh, char *statement, SV *params)); /* deprecated */
|
||||
IV dbd_db_do4_iv _((SV *dbh, imp_dbh_t *imp_dbh, char *statement, SV *params)); /* deprecated */
|
||||
IV dbd_db_do6 _((SV *dbh, imp_dbh_t *imp_dbh, SV *statement, SV *params, I32 items, I32 ax));
|
||||
int dbd_db_commit _((SV *dbh, imp_dbh_t *imp_dbh));
|
||||
int dbd_db_rollback _((SV *dbh, imp_dbh_t *imp_dbh));
|
||||
int dbd_db_disconnect _((SV *dbh, imp_dbh_t *imp_dbh));
|
||||
void dbd_db_destroy _((SV *dbh, imp_dbh_t *imp_dbh));
|
||||
int dbd_db_STORE_attrib _((SV *dbh, imp_dbh_t *imp_dbh, SV *keysv, SV *valuesv));
|
||||
SV *dbd_db_FETCH_attrib _((SV *dbh, imp_dbh_t *imp_dbh, SV *keysv));
|
||||
SV *dbd_db_last_insert_id _((SV *dbh, imp_dbh_t *imp_dbh, SV *catalog, SV *schema, SV *table, SV *field, SV *attr));
|
||||
AV *dbd_db_data_sources _((SV *dbh, imp_dbh_t *imp_dbh, SV *attr));
|
||||
|
||||
int dbd_st_prepare _((SV *sth, imp_sth_t *imp_sth, char *statement, SV *attribs)); /* deprecated */
|
||||
int dbd_st_prepare_sv _((SV *sth, imp_sth_t *imp_sth, SV *statement, SV *attribs));
|
||||
int dbd_st_rows _((SV *sth, imp_sth_t *imp_sth)); /* deprecated */
|
||||
IV dbd_st_rows_iv _((SV *sth, imp_sth_t *imp_sth));
|
||||
int dbd_st_execute _((SV *sth, imp_sth_t *imp_sth)); /* deprecated */
|
||||
IV dbd_st_execute_iv _((SV *sth, imp_sth_t *imp_sth));
|
||||
SV *dbd_st_last_insert_id _((SV *sth, imp_sth_t *imp_sth, SV *catalog, SV *schema, SV *table, SV *field, SV *attr));
|
||||
AV *dbd_st_fetch _((SV *sth, imp_sth_t *imp_sth));
|
||||
int dbd_st_finish3 _((SV *sth, imp_sth_t *imp_sth, int from_destroy));
|
||||
int dbd_st_finish _((SV *sth, imp_sth_t *imp_sth)); /* deprecated */
|
||||
void dbd_st_destroy _((SV *sth, imp_sth_t *imp_sth));
|
||||
int dbd_st_blob_read _((SV *sth, imp_sth_t *imp_sth,
|
||||
int field, long offset, long len, SV *destrv, long destoffset));
|
||||
int dbd_st_STORE_attrib _((SV *sth, imp_sth_t *imp_sth, SV *keysv, SV *valuesv));
|
||||
SV *dbd_st_FETCH_attrib _((SV *sth, imp_sth_t *imp_sth, SV *keysv));
|
||||
SV *dbd_st_execute_for_fetch _((SV *sth, imp_sth_t *imp_sth, SV *fetch_tuple_sub, SV *tuple_status));
|
||||
|
||||
int dbd_bind_ph _((SV *sth, imp_sth_t *imp_sth,
|
||||
SV *param, SV *value, IV sql_type, SV *attribs,
|
||||
int is_inout, IV maxlen));
|
||||
|
||||
#endif /* end of dbd_xsh.h */
|
|
@ -0,0 +1,96 @@
|
|||
/* $Id$
|
||||
*
|
||||
* Copyright (c) 1997,1998,1999 Tim Bunce England
|
||||
*
|
||||
* See COPYRIGHT section in DBI.pm for usage and distribution rights.
|
||||
*/
|
||||
|
||||
|
||||
/* Some core SQL CLI standard (ODBC) declarations */
|
||||
#ifndef SQL_SUCCESS /* don't clash with ODBC based drivers */
|
||||
|
||||
/* SQL datatype codes */
|
||||
#define SQL_GUID (-11)
|
||||
#define SQL_WLONGVARCHAR (-10)
|
||||
#define SQL_WVARCHAR (-9)
|
||||
#define SQL_WCHAR (-8)
|
||||
#define SQL_BIT (-7)
|
||||
#define SQL_TINYINT (-6)
|
||||
#define SQL_BIGINT (-5)
|
||||
#define SQL_LONGVARBINARY (-4)
|
||||
#define SQL_VARBINARY (-3)
|
||||
#define SQL_BINARY (-2)
|
||||
#define SQL_LONGVARCHAR (-1)
|
||||
#define SQL_UNKNOWN_TYPE 0
|
||||
#define SQL_ALL_TYPES 0
|
||||
#define SQL_CHAR 1
|
||||
#define SQL_NUMERIC 2
|
||||
#define SQL_DECIMAL 3
|
||||
#define SQL_INTEGER 4
|
||||
#define SQL_SMALLINT 5
|
||||
#define SQL_FLOAT 6
|
||||
#define SQL_REAL 7
|
||||
#define SQL_DOUBLE 8
|
||||
#define SQL_DATETIME 9
|
||||
#define SQL_DATE 9
|
||||
#define SQL_INTERVAL 10
|
||||
#define SQL_TIME 10
|
||||
#define SQL_TIMESTAMP 11
|
||||
#define SQL_VARCHAR 12
|
||||
#define SQL_BOOLEAN 16
|
||||
#define SQL_UDT 17
|
||||
#define SQL_UDT_LOCATOR 18
|
||||
#define SQL_ROW 19
|
||||
#define SQL_REF 20
|
||||
#define SQL_BLOB 30
|
||||
#define SQL_BLOB_LOCATOR 31
|
||||
#define SQL_CLOB 40
|
||||
#define SQL_CLOB_LOCATOR 41
|
||||
#define SQL_ARRAY 50
|
||||
#define SQL_ARRAY_LOCATOR 51
|
||||
#define SQL_MULTISET 55
|
||||
#define SQL_MULTISET_LOCATOR 56
|
||||
#define SQL_TYPE_DATE 91
|
||||
#define SQL_TYPE_TIME 92
|
||||
#define SQL_TYPE_TIMESTAMP 93
|
||||
#define SQL_TYPE_TIME_WITH_TIMEZONE 94
|
||||
#define SQL_TYPE_TIMESTAMP_WITH_TIMEZONE 95
|
||||
#define SQL_INTERVAL_YEAR 101
|
||||
#define SQL_INTERVAL_MONTH 102
|
||||
#define SQL_INTERVAL_DAY 103
|
||||
#define SQL_INTERVAL_HOUR 104
|
||||
#define SQL_INTERVAL_MINUTE 105
|
||||
#define SQL_INTERVAL_SECOND 106
|
||||
#define SQL_INTERVAL_YEAR_TO_MONTH 107
|
||||
#define SQL_INTERVAL_DAY_TO_HOUR 108
|
||||
#define SQL_INTERVAL_DAY_TO_MINUTE 109
|
||||
#define SQL_INTERVAL_DAY_TO_SECOND 110
|
||||
#define SQL_INTERVAL_HOUR_TO_MINUTE 111
|
||||
#define SQL_INTERVAL_HOUR_TO_SECOND 112
|
||||
#define SQL_INTERVAL_MINUTE_TO_SECOND 113
|
||||
|
||||
|
||||
/* Main return codes */
|
||||
#define SQL_ERROR (-1)
|
||||
#define SQL_SUCCESS 0
|
||||
#define SQL_SUCCESS_WITH_INFO 1
|
||||
#define SQL_NO_DATA_FOUND 100
|
||||
|
||||
/*
|
||||
* for ODBC SQL Cursor Types
|
||||
*/
|
||||
#define SQL_CURSOR_FORWARD_ONLY 0UL
|
||||
#define SQL_CURSOR_KEYSET_DRIVEN 1UL
|
||||
#define SQL_CURSOR_DYNAMIC 2UL
|
||||
#define SQL_CURSOR_STATIC 3UL
|
||||
#define SQL_CURSOR_TYPE_DEFAULT SQL_CURSOR_FORWARD_ONLY
|
||||
|
||||
#endif /* SQL_SUCCESS */
|
||||
|
||||
/* Handy macro for testing for success and success with info. */
|
||||
/* BEWARE that this macro can have side effects since rc appears twice! */
|
||||
/* So DONT use it as if(SQL_ok(func(...))) { ... } */
|
||||
#define SQL_ok(rc) ((rc)==SQL_SUCCESS || (rc)==SQL_SUCCESS_WITH_INFO)
|
||||
|
||||
|
||||
/* end of dbi_sql.h */
|
|
@ -0,0 +1,71 @@
|
|||
# -*- perl -*-
|
||||
my $file = $ARGV[0] || 'dbilogstrip';
|
||||
|
||||
my $script = <<'SCRIPT';
|
||||
~startperl~
|
||||
|
||||
=head1 NAME
|
||||
|
||||
dbilogstrip - filter to normalize DBI trace logs for diff'ing
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
Read DBI trace file C<dbitrace.log> and write out a stripped version to C<dbitrace_stripped.log>
|
||||
|
||||
dbilogstrip dbitrace.log > dbitrace_stripped.log
|
||||
|
||||
Run C<yourscript.pl> twice, each with different sets of arguments, with
|
||||
DBI_TRACE enabled. Filter the output and trace through C<dbilogstrip> into a
|
||||
separate file for each run. Then compare using diff. (This example assumes
|
||||
you're using a standard shell.)
|
||||
|
||||
DBI_TRACE=2 perl yourscript.pl ...args1... 2>&1 | dbilogstrip > dbitrace1.log
|
||||
DBI_TRACE=2 perl yourscript.pl ...args2... 2>&1 | dbilogstrip > dbitrace2.log
|
||||
diff -u dbitrace1.log dbitrace2.log
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
Replaces any hex addresses, e.g, C<0x128f72ce> with C<0xN>.
|
||||
|
||||
Replaces any references to process id or thread id, like C<pid#6254> with C<pidN>.
|
||||
|
||||
So a DBI trace line like this:
|
||||
|
||||
-> STORE for DBD::DBM::st (DBI::st=HASH(0x19162a0)~0x191f9c8 'f_params' ARRAY(0x1922018)) thr#1800400
|
||||
|
||||
will look like this:
|
||||
|
||||
-> STORE for DBD::DBM::st (DBI::st=HASH(0xN)~0xN 'f_params' ARRAY(0xN)) thrN
|
||||
|
||||
=cut
|
||||
|
||||
use strict;
|
||||
|
||||
while (<>) {
|
||||
# normalize hex addresses: 0xDEADHEAD => 0xN
|
||||
s/ \b 0x [0-9a-f]+ /0xN/gx;
|
||||
# normalize process and thread id number
|
||||
s/ \b (pid|tid|thr) \W? \d+ /${1}N/gx;
|
||||
|
||||
} continue {
|
||||
print or die "-p destination: $!\n";
|
||||
}
|
||||
|
||||
|
||||
SCRIPT
|
||||
|
||||
require Config;
|
||||
my $config = {};
|
||||
$config->{'startperl'} = $Config::Config{'startperl'};
|
||||
|
||||
$script =~ s/\~(\w+)\~/$config->{$1}/eg;
|
||||
if (!(open(FILE, ">$file")) ||
|
||||
!(print FILE $script) ||
|
||||
!(close(FILE))) {
|
||||
die "Error while writing $file: $!\n";
|
||||
}
|
||||
chmod 0755, $file;
|
||||
print "Extracted $file from ",__FILE__," with variable substitutions.\n";
|
||||
# syntax check resulting file, but only for developers
|
||||
exit 1 if -d ".svn" and system($^X, '-wc', '-Mblib', $file) != 0;
|
||||
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,287 @@
|
|||
# -*- perl -*-
|
||||
|
||||
my $file = $ARGV[0] || 'dbiprof';
|
||||
|
||||
my $script = <<'SCRIPT';
|
||||
~startperl~
|
||||
|
||||
use strict;
|
||||
|
||||
my $VERSION = sprintf("1.%06d", q$Revision$ =~ /(\d+)/o);
|
||||
|
||||
use Data::Dumper;
|
||||
use DBI::ProfileData;
|
||||
use Getopt::Long;
|
||||
|
||||
# default options
|
||||
my $number = 10;
|
||||
my $sort = 'total';
|
||||
my $filename = 'dbi.prof';
|
||||
my $reverse = 0;
|
||||
my $case_sensitive = 0;
|
||||
my (%match, %exclude);
|
||||
|
||||
# get options from command line
|
||||
GetOptions(
|
||||
'version' => sub { die "dbiprof $VERSION\n" },
|
||||
'help' => sub { exit usage() },
|
||||
'number=i' => \$number,
|
||||
'sort=s' => \$sort,
|
||||
'dumpnodes!' => \my $dumpnodes,
|
||||
'reverse' => \$reverse,
|
||||
'match=s' => \%match,
|
||||
'exclude=s' => \%exclude,
|
||||
'case-sensitive' => \$case_sensitive,
|
||||
'delete!' => \my $opt_delete,
|
||||
) or exit usage();
|
||||
|
||||
sub usage {
|
||||
print <<EOS;
|
||||
dbiprof [options] [files]
|
||||
|
||||
Reads and merges DBI profile data from files and prints a summary.
|
||||
|
||||
files: defaults to $filename
|
||||
|
||||
options:
|
||||
|
||||
-number=N show top N, defaults to $number
|
||||
-sort=S sort by S, defaults to $sort
|
||||
-reverse reverse the sort
|
||||
-match=K=V for filtering, see docs
|
||||
-exclude=K=V for filtering, see docs
|
||||
-case_sensitive for -match and -exclude
|
||||
-delete rename files before reading then delete afterwards
|
||||
-version print version number and exit
|
||||
-help print this help
|
||||
|
||||
EOS
|
||||
return 1;
|
||||
}
|
||||
|
||||
# list of files defaults to dbi.prof
|
||||
my @files = @ARGV ? @ARGV : ('dbi.prof');
|
||||
|
||||
|
||||
# instantiate ProfileData object
|
||||
my $prof = eval {
|
||||
DBI::ProfileData->new(
|
||||
Files => \@files,
|
||||
DeleteFiles => $opt_delete,
|
||||
);
|
||||
};
|
||||
die "Unable to load profile data: $@\n" if $@;
|
||||
|
||||
if (%match) { # handle matches
|
||||
while (my ($key, $val) = each %match) {
|
||||
if ($val =~ m!^/(.+)/$!) {
|
||||
$val = $case_sensitive ? qr/$1/ : qr/$1/i;
|
||||
}
|
||||
$prof->match($key, $val, case_sensitive => $case_sensitive);
|
||||
}
|
||||
}
|
||||
|
||||
if (%exclude) { # handle excludes
|
||||
while (my ($key, $val) = each %exclude) {
|
||||
if ($val =~ m!^/(.+)/$!) {
|
||||
$val = $case_sensitive ? qr/$1/ : qr/$1/i;
|
||||
}
|
||||
$prof->exclude($key, $val, case_sensitive => $case_sensitive);
|
||||
}
|
||||
}
|
||||
|
||||
# sort the data
|
||||
$prof->sort(field => $sort, reverse => $reverse);
|
||||
|
||||
# all done, print it out
|
||||
if ($dumpnodes) {
|
||||
$Data::Dumper::Indent = 1;
|
||||
$Data::Dumper::Terse = 1;
|
||||
$Data::Dumper::Useqq = 1;
|
||||
$Data::Dumper::Deparse = 0;
|
||||
print Dumper($prof->nodes);
|
||||
}
|
||||
else {
|
||||
print $prof->report(number => $number);
|
||||
}
|
||||
exit 0;
|
||||
|
||||
__END__
|
||||
|
||||
=head1 NAME
|
||||
|
||||
dbiprof - command-line client for DBI::ProfileData
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
See a report of the ten queries with the longest total runtime in the
|
||||
profile dump file F<prof1.out>:
|
||||
|
||||
dbiprof prof1.out
|
||||
|
||||
See the top 10 most frequently run queries in the profile file
|
||||
F<dbi.prof> (the default):
|
||||
|
||||
dbiprof --sort count
|
||||
|
||||
See the same report with 15 entries:
|
||||
|
||||
dbiprof --sort count --number 15
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
This tool is a command-line client for the DBI::ProfileData. It
|
||||
allows you to analyze the profile data file produced by
|
||||
DBI::ProfileDumper and produce various useful reports.
|
||||
|
||||
=head1 OPTIONS
|
||||
|
||||
This program accepts the following options:
|
||||
|
||||
=over 4
|
||||
|
||||
=item --number N
|
||||
|
||||
Produce this many items in the report. Defaults to 10. If set to
|
||||
"all" then all results are shown.
|
||||
|
||||
=item --sort field
|
||||
|
||||
Sort results by the given field. Sorting by multiple fields isn't currently
|
||||
supported (patches welcome). The available sort fields are:
|
||||
|
||||
=over 4
|
||||
|
||||
=item total
|
||||
|
||||
Sorts by total time run time across all runs. This is the default
|
||||
sort.
|
||||
|
||||
=item longest
|
||||
|
||||
Sorts by the longest single run.
|
||||
|
||||
=item count
|
||||
|
||||
Sorts by total number of runs.
|
||||
|
||||
=item first
|
||||
|
||||
Sorts by the time taken in the first run.
|
||||
|
||||
=item shortest
|
||||
|
||||
Sorts by the shortest single run.
|
||||
|
||||
=item key1
|
||||
|
||||
Sorts by the value of the first element in the Path, which should be numeric.
|
||||
You can also sort by C<key2> and C<key3>.
|
||||
|
||||
=back
|
||||
|
||||
=item --reverse
|
||||
|
||||
Reverses the selected sort. For example, to see a report of the
|
||||
shortest overall time:
|
||||
|
||||
dbiprof --sort total --reverse
|
||||
|
||||
=item --match keyN=value
|
||||
|
||||
Consider only items where the specified key matches the given value.
|
||||
Keys are numbered from 1. For example, let's say you used a
|
||||
DBI::Profile Path of:
|
||||
|
||||
[ DBIprofile_Statement, DBIprofile_Methodname ]
|
||||
|
||||
And called dbiprof as in:
|
||||
|
||||
dbiprof --match key2=execute
|
||||
|
||||
Your report would only show execute queries, leaving out prepares,
|
||||
fetches, etc.
|
||||
|
||||
If the value given starts and ends with slashes (C</>) then it will be
|
||||
treated as a regular expression. For example, to only include SELECT
|
||||
queries where key1 is the statement:
|
||||
|
||||
dbiprof --match key1=/^SELECT/
|
||||
|
||||
By default the match expression is matched case-insensitively, but
|
||||
this can be changed with the --case-sensitive option.
|
||||
|
||||
=item --exclude keyN=value
|
||||
|
||||
Remove items for where the specified key matches the given value. For
|
||||
example, to exclude all prepare entries where key2 is the method name:
|
||||
|
||||
dbiprof --exclude key2=prepare
|
||||
|
||||
Like C<--match>, If the value given starts and ends with slashes
|
||||
(C</>) then it will be treated as a regular expression. For example,
|
||||
to exclude UPDATE queries where key1 is the statement:
|
||||
|
||||
dbiprof --match key1=/^UPDATE/
|
||||
|
||||
By default the exclude expression is matched case-insensitively, but
|
||||
this can be changed with the --case-sensitive option.
|
||||
|
||||
=item --case-sensitive
|
||||
|
||||
Using this option causes --match and --exclude to work
|
||||
case-sensitively. Defaults to off.
|
||||
|
||||
=item --delete
|
||||
|
||||
Sets the C<DeleteFiles> option to L<DBI::ProfileData> which causes the
|
||||
files to be deleted after reading. See L<DBI::ProfileData> for more details.
|
||||
|
||||
=item --dumpnodes
|
||||
|
||||
Print the list of nodes in the form of a perl data structure.
|
||||
Use the C<-sort> option if you want the list sorted.
|
||||
|
||||
=item --version
|
||||
|
||||
Print the dbiprof version number and exit.
|
||||
|
||||
=back
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Sam Tregar <sam@tregar.com>
|
||||
|
||||
=head1 COPYRIGHT AND LICENSE
|
||||
|
||||
Copyright (C) 2002 Sam Tregar
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the same terms as Perl 5 itself.
|
||||
|
||||
=head1 SEE ALSO
|
||||
|
||||
L<DBI::ProfileDumper|DBI::ProfileDumper>,
|
||||
L<DBI::Profile|DBI::Profile>, L<DBI|DBI>.
|
||||
|
||||
=cut
|
||||
|
||||
SCRIPT
|
||||
|
||||
|
||||
require Config;
|
||||
my $config = {};
|
||||
$config->{'startperl'} = $Config::Config{'startperl'};
|
||||
|
||||
$script =~ s/\~(\w+)\~/$config->{$1}/eg;
|
||||
if (!(open(FILE, ">$file")) ||
|
||||
!(print FILE $script) ||
|
||||
!(close(FILE))) {
|
||||
die "Error while writing $file: $!\n";
|
||||
}
|
||||
chmod 0755, $file;
|
||||
print "Extracted $file from ",__FILE__," with variable substitutions.\n";
|
||||
|
||||
# syntax check resulting file, but only for developers
|
||||
exit 1 if -d ".svn"|| -d ".git" and system($^X, '-wc', '-Mblib', $file) != 0;
|
||||
|
|
@ -0,0 +1,208 @@
|
|||
# -*- perl -*-
|
||||
|
||||
my $file = $ARGV[0] || 'dbiproxy';
|
||||
|
||||
my $script = <<'SCRIPT';
|
||||
~startperl~
|
||||
|
||||
use strict;
|
||||
|
||||
my $VERSION = sprintf("1.%06d", q$Revision$ =~ /(\d+)/o);
|
||||
|
||||
my $arg_test = shift(@ARGV) if $ARGV[0] eq '--test';
|
||||
$ENV{DBI_TRACE} = shift(@ARGV) || 2 if $ARGV[0] =~ s/^--dbitrace=?//;
|
||||
|
||||
require DBI::ProxyServer;
|
||||
|
||||
# XXX these should probably be moved into DBI::ProxyServer
|
||||
delete $ENV{IFS};
|
||||
delete $ENV{CDPATH};
|
||||
delete $ENV{ENV};
|
||||
delete $ENV{BASH_ENV};
|
||||
|
||||
if ($arg_test) {
|
||||
require RPC::PlServer::Test;
|
||||
@DBI::ProxyServer::ISA = qw(RPC::PlServer::Test DBI);
|
||||
}
|
||||
|
||||
DBI::ProxyServer::main(@ARGV);
|
||||
|
||||
exit(0);
|
||||
|
||||
|
||||
__END__
|
||||
|
||||
=head1 NAME
|
||||
|
||||
dbiproxy - A proxy server for the DBD::Proxy driver
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
dbiproxy <options> --localport=<port>
|
||||
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
This tool is just a front end for the DBI::ProxyServer package. All it
|
||||
does is picking options from the command line and calling
|
||||
DBI::ProxyServer::main(). See L<DBI::ProxyServer> for details.
|
||||
|
||||
Available options include:
|
||||
|
||||
=over 4
|
||||
|
||||
=item B<--chroot=dir>
|
||||
|
||||
(UNIX only) After doing a bind(), change root directory to the given
|
||||
directory by doing a chroot(). This is useful for security, but it
|
||||
restricts the environment a lot. For example, you need to load DBI
|
||||
drivers in the config file or you have to create hard links to Unix
|
||||
sockets, if your drivers are using them. For example, with MySQL, a
|
||||
config file might contain the following lines:
|
||||
|
||||
my $rootdir = '/var/dbiproxy';
|
||||
my $unixsockdir = '/tmp';
|
||||
my $unixsockfile = 'mysql.sock';
|
||||
foreach $dir ($rootdir, "$rootdir$unixsockdir") {
|
||||
mkdir 0755, $dir;
|
||||
}
|
||||
link("$unixsockdir/$unixsockfile",
|
||||
"$rootdir$unixsockdir/$unixsockfile");
|
||||
require DBD::mysql;
|
||||
|
||||
{
|
||||
'chroot' => $rootdir,
|
||||
...
|
||||
}
|
||||
|
||||
If you don't know chroot(), think of an FTP server where you can see a
|
||||
certain directory tree only after logging in. See also the --group and
|
||||
--user options.
|
||||
|
||||
=item B<--configfile=file>
|
||||
|
||||
Config files are assumed to return a single hash ref that overrides the
|
||||
arguments of the new method. However, command line arguments in turn take
|
||||
precedence over the config file. See the "CONFIGURATION FILE" section
|
||||
in the L<DBI::ProxyServer> documentation for details on the config file.
|
||||
|
||||
=item B<--debug>
|
||||
|
||||
Turn debugging mode on. Mainly this asserts that logging messages of
|
||||
level "debug" are created.
|
||||
|
||||
=item B<--facility=mode>
|
||||
|
||||
(UNIX only) Facility to use for L<Sys::Syslog>. The default is
|
||||
B<daemon>.
|
||||
|
||||
=item B<--group=gid>
|
||||
|
||||
After doing a bind(), change the real and effective GID to the given.
|
||||
This is useful, if you want your server to bind to a privileged port
|
||||
(<1024), but don't want the server to execute as root. See also
|
||||
the --user option.
|
||||
|
||||
GID's can be passed as group names or numeric values.
|
||||
|
||||
=item B<--localaddr=ip>
|
||||
|
||||
By default a daemon is listening to any IP number that a machine
|
||||
has. This attribute allows one to restrict the server to the given
|
||||
IP number.
|
||||
|
||||
=item B<--localport=port>
|
||||
|
||||
This attribute sets the port on which the daemon is listening. It
|
||||
must be given somehow, as there's no default.
|
||||
|
||||
=item B<--logfile=file>
|
||||
|
||||
Be default logging messages will be written to the syslog (Unix) or
|
||||
to the event log (Windows NT). On other operating systems you need to
|
||||
specify a log file. The special value "STDERR" forces logging to
|
||||
stderr. See L<Net::Daemon::Log> for details.
|
||||
|
||||
=item B<--mode=modename>
|
||||
|
||||
The server can run in three different modes, depending on the environment.
|
||||
|
||||
If you are running Perl 5.005 and did compile it for threads, then the
|
||||
server will create a new thread for each connection. The thread will
|
||||
execute the server's Run() method and then terminate. This mode is the
|
||||
default, you can force it with "--mode=threads".
|
||||
|
||||
If threads are not available, but you have a working fork(), then the
|
||||
server will behave similar by creating a new process for each connection.
|
||||
This mode will be used automatically in the absence of threads or if
|
||||
you use the "--mode=fork" option.
|
||||
|
||||
Finally there's a single-connection mode: If the server has accepted a
|
||||
connection, he will enter the Run() method. No other connections are
|
||||
accepted until the Run() method returns (if the client disconnects).
|
||||
This operation mode is useful if you have neither threads nor fork(),
|
||||
for example on the Macintosh. For debugging purposes you can force this
|
||||
mode with "--mode=single".
|
||||
|
||||
=item B<--pidfile=file>
|
||||
|
||||
(UNIX only) If this option is present, a PID file will be created at the
|
||||
given location. Default is to not create a pidfile.
|
||||
|
||||
=item B<--user=uid>
|
||||
|
||||
After doing a bind(), change the real and effective UID to the given.
|
||||
This is useful, if you want your server to bind to a privileged port
|
||||
(<1024), but don't want the server to execute as root. See also
|
||||
the --group and the --chroot options.
|
||||
|
||||
UID's can be passed as group names or numeric values.
|
||||
|
||||
=item B<--version>
|
||||
|
||||
Suppresses startup of the server; instead the version string will
|
||||
be printed and the program exits immediately.
|
||||
|
||||
=back
|
||||
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Copyright (c) 1997 Jochen Wiedmann
|
||||
Am Eisteich 9
|
||||
72555 Metzingen
|
||||
Germany
|
||||
|
||||
Email: joe@ispsoft.de
|
||||
Phone: +49 7123 14881
|
||||
|
||||
The DBI::ProxyServer module is free software; you can redistribute it
|
||||
and/or modify it under the same terms as Perl itself. In particular
|
||||
permission is granted to Tim Bunce for distributing this as a part of
|
||||
the DBI.
|
||||
|
||||
|
||||
=head1 SEE ALSO
|
||||
|
||||
L<DBI::ProxyServer>, L<DBD::Proxy>, L<DBI>
|
||||
|
||||
=cut
|
||||
SCRIPT
|
||||
|
||||
|
||||
require Config;
|
||||
my $config = {};
|
||||
$config->{'startperl'} = $Config::Config{'startperl'};
|
||||
|
||||
$script =~ s/\~(\w+)\~/$config->{$1}/eg;
|
||||
if (!(open(FILE, ">$file")) ||
|
||||
!(print FILE $script) ||
|
||||
!(close(FILE))) {
|
||||
die "Error while writing $file: $!\n";
|
||||
}
|
||||
chmod 0755, $file;
|
||||
print "Extracted $file from ",__FILE__," with variable substitutions.\n";
|
||||
|
||||
# syntax check resulting file, but only for developers
|
||||
exit 1 if -d ".svn" || -d ".git" and system($^X, '-wc', '-Mblib', $file) != 0;
|
||||
|
|
@ -0,0 +1,52 @@
|
|||
/* dbivport.h
|
||||
|
||||
Provides macros that enable greater portability between DBI versions.
|
||||
|
||||
This file should be *copied* and included in driver distributions
|
||||
and #included into the source, after #include DBIXS.h
|
||||
|
||||
New driver releases should include an updated copy of dbivport.h
|
||||
from the most recent DBI release.
|
||||
*/
|
||||
|
||||
#ifndef DBI_VPORT_H
|
||||
#define DBI_VPORT_H
|
||||
|
||||
#ifndef DBIh_SET_ERR_CHAR
|
||||
/* Emulate DBIh_SET_ERR_CHAR
|
||||
Only uses the err_i, errstr and state parameters.
|
||||
*/
|
||||
#define DBIh_SET_ERR_CHAR(h, imp_xxh, err_c, err_i, errstr, state, method) \
|
||||
sv_setiv(DBIc_ERR(imp_xxh), err_i); \
|
||||
(state) ? (void)sv_setpv(DBIc_STATE(imp_xxh), state) : (void)SvOK_off(DBIc_STATE(imp_xxh)); \
|
||||
sv_setpv(DBIc_ERRSTR(imp_xxh), errstr)
|
||||
#endif
|
||||
|
||||
#ifndef DBIcf_Executed
|
||||
#define DBIcf_Executed 0x080000
|
||||
#endif
|
||||
|
||||
#ifndef DBIc_TRACE_LEVEL_MASK
|
||||
#define DBIc_TRACE_LEVEL_MASK 0x0000000F
|
||||
#define DBIc_TRACE_FLAGS_MASK 0xFFFFFF00
|
||||
#define DBIc_TRACE_SETTINGS(imp) (DBIc_DBISTATE(imp)->debug)
|
||||
#define DBIc_TRACE_LEVEL(imp) (DBIc_TRACE_SETTINGS(imp) & DBIc_TRACE_LEVEL_MASK)
|
||||
#define DBIc_TRACE_FLAGS(imp) (DBIc_TRACE_SETTINGS(imp) & DBIc_TRACE_FLAGS_MASK)
|
||||
/* DBIc_TRACE_MATCHES - true if s1 'matches' s2 (c.f. trace_msg())
|
||||
DBIc_TRACE_MATCHES(foo, DBIc_TRACE_SETTINGS(imp))
|
||||
*/
|
||||
#define DBIc_TRACE_MATCHES(s1, s2) \
|
||||
( ((s1 & DBIc_TRACE_LEVEL_MASK) >= (s2 & DBIc_TRACE_LEVEL_MASK)) \
|
||||
|| ((s1 & DBIc_TRACE_FLAGS_MASK) & (s2 & DBIc_TRACE_FLAGS_MASK)) )
|
||||
/* DBIc_TRACE - true if flags match & DBI level>=flaglevel, or if DBI level>level
|
||||
DBIc_TRACE(imp, 0, 0, 4) = if level >= 4
|
||||
DBIc_TRACE(imp, DBDtf_FOO, 2, 4) = if tracing DBDtf_FOO & level>=2 or level>=4
|
||||
DBIc_TRACE(imp, DBDtf_FOO, 2, 0) = as above but never trace just due to level
|
||||
*/
|
||||
#define DBIc_TRACE(imp, flags, flaglevel, level) \
|
||||
( (flags && (DBIc_TRACE_FLAGS(imp) & flags) && (DBIc_TRACE_LEVEL(imp) >= flaglevel)) \
|
||||
|| (level && DBIc_TRACE_LEVEL(imp) >= level) )
|
||||
#endif
|
||||
|
||||
|
||||
#endif /* !DBI_VPORT_H */
|
|
@ -0,0 +1,3 @@
|
|||
/* Fri Jul 13 13:32:02 2012 */
|
||||
/* Mixed revision working copy (15349:15353) */
|
||||
#define DBIXS_REVISION 15349
|
|
@ -0,0 +1,51 @@
|
|||
#!perl -w
|
||||
use strict;
|
||||
|
||||
my $dbixs_rev_file = "dbixs_rev.h";
|
||||
|
||||
my $is_make_dist;
|
||||
my $svnversion;
|
||||
|
||||
if (is_dbi_svn_dir(".")) {
|
||||
$svnversion = `svnversion -n`;
|
||||
}
|
||||
elsif (is_dbi_svn_dir("..")) {
|
||||
# presumably we're in a subdirectory because the user is doing a 'make dist'
|
||||
$svnversion = `svnversion -n ..`;
|
||||
$is_make_dist = 1;
|
||||
}
|
||||
else {
|
||||
# presumably we're being run by an end-user because their file timestamps
|
||||
# got messed up
|
||||
print "Skipping regeneration of $dbixs_rev_file\n";
|
||||
utime(time(), time(), $dbixs_rev_file); # update modification time
|
||||
exit 0;
|
||||
}
|
||||
|
||||
my @warn;
|
||||
die "Neither current directory nor parent directory are an svn working copy\n"
|
||||
unless $svnversion and $svnversion =~ m/^\d+/;
|
||||
push @warn, "Mixed revision working copy ($svnversion:$1)"
|
||||
if $svnversion =~ s/:(\d+)//;
|
||||
push @warn, "Code modified since last checkin"
|
||||
if $svnversion =~ s/[MS]+$//;
|
||||
warn "$dbixs_rev_file warning: $_\n" for @warn;
|
||||
die "$0 failed\n" if $is_make_dist && @warn;
|
||||
|
||||
write_header($dbixs_rev_file, DBIXS_REVISION => $svnversion, \@warn);
|
||||
|
||||
sub write_header {
|
||||
my ($file, $macro, $version, $comments_ref) = @_;
|
||||
open my $fh, ">$file" or die "Can't open $file: $!\n";
|
||||
unshift @$comments_ref, scalar localtime(time);
|
||||
print $fh "/* $_ */\n" for @$comments_ref;
|
||||
print $fh "#define $macro $version\n";
|
||||
close $fh or die "Error closing $file: $!\n";
|
||||
print "Wrote $macro $version to $file\n";
|
||||
}
|
||||
|
||||
sub is_dbi_svn_dir {
|
||||
my ($dir) = @_;
|
||||
return (-d "$dir/.svn" && -f "$dir/MANIFEST.SKIP");
|
||||
}
|
||||
|
|
@ -0,0 +1,32 @@
|
|||
#!perl
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use Time::HiRes qw(time);
|
||||
|
||||
BEGIN { $ENV{PERL_ANYEVENT_STRICT} = 1; $ENV{PERL_ANYEVENT_VERBOSE} = 1; }
|
||||
|
||||
use AnyEvent;
|
||||
|
||||
BEGIN { $ENV{DBI_TRACE} = 0; $ENV{DBI_PUREPERL} = 0; $ENV{DBI_GOFER_TRACE} = 0; $ENV{DBD_GOFER_TRACE} = 0; };
|
||||
|
||||
use DBI;
|
||||
|
||||
$ENV{DBI_AUTOPROXY} = 'dbi:Gofer:transport=corostream';
|
||||
|
||||
my $ticker = AnyEvent->timer( after => 0, interval => 0.1, cb => sub {
|
||||
warn sprintf "-tick- %.2f\n", time
|
||||
} );
|
||||
|
||||
warn "connecting...\n";
|
||||
my $dbh = DBI->connect("dbi:NullP:");
|
||||
warn "...connected\n";
|
||||
|
||||
for (1..5) {
|
||||
warn "entering DBI...\n";
|
||||
$dbh->do("sleep 0.3"); # pseudo-sql understood by the DBD::NullP driver
|
||||
warn "...returned\n";
|
||||
}
|
||||
|
||||
warn "done.";
|
||||
|
|
@ -0,0 +1,176 @@
|
|||
#! /usr/bin/perl -w
|
||||
|
||||
# This script checks which style of WHERE clause(s) will support both
|
||||
# null and non-null values. Refer to the NULL Values sub-section
|
||||
# of the "Placeholders and Bind Values" section in the DBI
|
||||
# documention for more information on this issue. The clause styles
|
||||
# and their numbering (0-6) map directly to the examples in the
|
||||
# documentation.
|
||||
#
|
||||
# To use this script:
|
||||
#
|
||||
# 1) If you are not using the DBI_DSN env variable, then update the
|
||||
# connect method arguments to support your database engine and
|
||||
# database, and remove the nearby check for DBI_DSN.
|
||||
# 2) Set PrintError to 1 in the connect method if you want see the
|
||||
# engine's reason WHY your engine won't support a particular
|
||||
# style.
|
||||
# 3) If your database does not support NULL columns by default
|
||||
# (e.g. Sybase) find and edit the CREATE TABLE statement
|
||||
# accordingly.
|
||||
# 4) To properly test style #5, you need the capability to create the
|
||||
# stored procedure SP_ISNULL that acts as a function: it tests its
|
||||
# argument and returns 1 if it is null, 0 otherwise. For example,
|
||||
# using Informix IDS engine, a definition would look like:
|
||||
#
|
||||
# CREATE PROCEDURE SP_ISNULL (arg VARCHAR(32)) RETURNING INTEGER;
|
||||
# IF arg IS NULL THEN RETURN 1;
|
||||
# ELSE RETURN 0;
|
||||
# END IF;
|
||||
# END PROCEDURE;
|
||||
#
|
||||
# Warning: This script will attempt to create a table named by the
|
||||
# $tablename variable (default dbi__null_test_tmp) and WILL DESTROY
|
||||
# any pre-existing table so named.
|
||||
|
||||
use strict;
|
||||
use DBI;
|
||||
|
||||
# The array represents the values that will be stored in the char column of our table.
|
||||
# One array element per row.
|
||||
# We expect the non-null test to return row 3 (Marge)
|
||||
# and the null test to return rows 2 and 4 (the undefs).
|
||||
|
||||
my $homer = "Homer";
|
||||
my $marge = "Marge";
|
||||
|
||||
my @char_column_values = (
|
||||
$homer, # 1
|
||||
undef, # 2
|
||||
$marge, # 3
|
||||
undef, # 4
|
||||
);
|
||||
|
||||
# Define the SQL statements with the various WHERE clause styles we want to test
|
||||
# and the parameters we'll substitute.
|
||||
|
||||
my @select_clauses =
|
||||
(
|
||||
{clause=>qq{WHERE mycol = ?}, nonnull=>[$marge], null=>[undef]},
|
||||
{clause=>qq{WHERE NVL(mycol, '-') = NVL(?, '-')}, nonnull=>[$marge], null=>[undef]},
|
||||
{clause=>qq{WHERE ISNULL(mycol, '-') = ISNULL(?, '-')}, nonnull=>[$marge], null=>[undef]},
|
||||
{clause=>qq{WHERE DECODE(mycol, ?, 1, 0) = 1}, nonnull=>[$marge], null=>[undef]},
|
||||
{clause=>qq{WHERE mycol = ? OR (mycol IS NULL AND ? IS NULL)}, nonnull=>[$marge,$marge], null=>[undef,undef]},
|
||||
{clause=>qq{WHERE mycol = ? OR (mycol IS NULL AND SP_ISNULL(?) = 1)}, nonnull=>[$marge,$marge], null=>[undef,undef]},
|
||||
{clause=>qq{WHERE mycol = ? OR (mycol IS NULL AND ? = 1)}, nonnull=>[$marge,0], null=>[undef,1]},
|
||||
);
|
||||
|
||||
# This is the table we'll create and use for these tests.
|
||||
# If it exists, we'll DESTROY it too. So the name must be obscure.
|
||||
|
||||
my $tablename = "dbi__null_test_tmp";
|
||||
|
||||
# Remove this if you are not using the DBI_DSN env variable,
|
||||
# and update the connect statement below.
|
||||
|
||||
die "DBI_DSN environment variable not defined"
|
||||
unless $ENV{DBI_DSN};
|
||||
|
||||
my $dbh = DBI->connect(undef, undef, undef,
|
||||
{
|
||||
RaiseError => 0,
|
||||
PrintError => 1
|
||||
}
|
||||
) || die DBI->errstr;
|
||||
|
||||
printf "Using %s, db version: %s\n", $ENV{DBI_DSN} || "connect arguments", $dbh->get_info(18) || "(unknown)";
|
||||
|
||||
my $sth;
|
||||
my @ok;
|
||||
|
||||
print "=> Drop table '$tablename', if it already exists...\n";
|
||||
do { local $dbh->{PrintError}=0; $dbh->do("DROP TABLE $tablename"); };
|
||||
|
||||
print "=> Create table '$tablename'...\n";
|
||||
$dbh->do("CREATE TABLE $tablename (myid int NOT NULL, mycol char(5))");
|
||||
# Use this if your database does not support NULL columns by default:
|
||||
#$dbh->do("CREATE TABLE $tablename (myid int NOT NULL, mycol char(5) NULL)");
|
||||
|
||||
print "=> Insert 4 rows into the table...\n";
|
||||
|
||||
$sth = $dbh->prepare("INSERT INTO $tablename (myid, mycol) VALUES (?,?)");
|
||||
for my $i (0..$#char_column_values)
|
||||
{
|
||||
my $val = $char_column_values[$i];
|
||||
printf " Inserting values (%d, %s)\n", $i+1, $dbh->quote($val);
|
||||
$sth->execute($i+1, $val);
|
||||
}
|
||||
print "(Driver bug: statement handle should not be Active after an INSERT.)\n"
|
||||
if $sth->{Active};
|
||||
|
||||
# Run the tests...
|
||||
|
||||
for my $i (0..$#select_clauses)
|
||||
{
|
||||
my $sel = $select_clauses[$i];
|
||||
print "\n=> Testing clause style $i: ".$sel->{clause}."...\n";
|
||||
|
||||
$sth = $dbh->prepare("SELECT myid,mycol FROM $tablename ".$sel->{clause})
|
||||
or next;
|
||||
|
||||
print " Selecting row with $marge\n";
|
||||
$sth->execute(@{$sel->{nonnull}})
|
||||
or next;
|
||||
my $r1 = $sth->fetchall_arrayref();
|
||||
my $n1_rows = $sth->rows;
|
||||
my $n1 = @$r1;
|
||||
|
||||
print " Selecting rows with NULL\n";
|
||||
$sth->execute(@{$sel->{null}})
|
||||
or next;
|
||||
my $r2 = $sth->fetchall_arrayref();
|
||||
my $n2_rows = $sth->rows;
|
||||
my $n2 = @$r2;
|
||||
|
||||
# Complain a bit...
|
||||
|
||||
print "\n=>Your DBD driver doesn't support the 'rows' method very well.\n\n"
|
||||
unless ($n1_rows == $n1 && $n2_rows == $n2);
|
||||
|
||||
# Did we get back the expected "n"umber of rows?
|
||||
# Did we get back the specific "r"ows we expected as identifed by the myid column?
|
||||
|
||||
if ( $n1 == 1 # one row for Marge
|
||||
&& $n2 == 2 # two rows for nulls
|
||||
&& $r1->[0][0] == 3 # Marge is myid 3
|
||||
&& $r2->[0][0] == 2 # NULL for myid 2
|
||||
&& $r2->[1][0] == 4 # NULL for myid 4
|
||||
) {
|
||||
print "=> WHERE clause style $i is supported.\n";
|
||||
push @ok, "\tStyle $i: ".$sel->{clause};
|
||||
}
|
||||
else
|
||||
{
|
||||
print "=> WHERE clause style $i returned incorrect results.\n";
|
||||
if ($n1 > 0 || $n2 > 0)
|
||||
{
|
||||
print " Non-NULL test rows returned these row ids: ".
|
||||
join(", ", map { $r1->[$_][0] } (0..$#{$r1}))."\n";
|
||||
print " The NULL test rows returned these row ids: ".
|
||||
join(", ", map { $r2->[$_][0] } (0..$#{$r2}))."\n";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
$dbh->disconnect();
|
||||
print "\n";
|
||||
print "-" x 72, "\n";
|
||||
printf "%d styles are supported:\n", scalar @ok;
|
||||
print "$_\n" for @ok;
|
||||
print "-" x 72, "\n";
|
||||
print "\n";
|
||||
print "If these results don't match what's in the 'Placeholders and Bind Values'\n";
|
||||
print "section of the DBI documentation, or are for a database that not already\n";
|
||||
print "listed, please email the results to dbi-users\@perl.org. Thank you.\n";
|
||||
|
||||
exit 0;
|
|
@ -0,0 +1,25 @@
|
|||
#!/usr/bin/perl -w
|
||||
|
||||
use DBI;
|
||||
|
||||
$dbh = DBI->connect('dbi:SQLite:dbname=ex_profile.db', '', '', { RaiseError => 1 });
|
||||
|
||||
$dbh->do("DROP TABLE IF EXISTS ex_profile");
|
||||
$dbh->do("CREATE TABLE ex_profile (a int)");
|
||||
|
||||
$dbh->do("INSERT INTO ex_profile (a) VALUES ($_)", undef) for 1..100;
|
||||
#$dbh->do("INSERT INTO ex_profile (a) VALUES (?)", undef, $_) for 1..100;
|
||||
|
||||
my $select_sql = "SELECT a FROM ex_profile";
|
||||
|
||||
$dbh->selectall_arrayref($select_sql);
|
||||
|
||||
$dbh->selectall_hashref($select_sql, 'a');
|
||||
|
||||
my $sth = $dbh->prepare($select_sql);
|
||||
$sth->execute;
|
||||
while ( @row = $sth->fetchrow_array ) {
|
||||
}
|
||||
|
||||
|
||||
__DATA__
|
|
@ -0,0 +1,52 @@
|
|||
# -*- perl -*-
|
||||
|
||||
package Bundle::DBI;
|
||||
|
||||
use strict;
|
||||
our $VERSION = "12.008696";
|
||||
|
||||
1;
|
||||
|
||||
__END__
|
||||
|
||||
=head1 NAME
|
||||
|
||||
Bundle::DBI - A bundle to install DBI and required modules.
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
perl -MCPAN -e 'install Bundle::DBI'
|
||||
|
||||
=head1 CONTENTS
|
||||
|
||||
DBI - for to get to know thyself
|
||||
|
||||
DBI::Shell 11.91 - the DBI command line shell
|
||||
|
||||
Storable 2.06 - for DBD::Proxy, DBI::ProxyServer, DBD::Forward
|
||||
|
||||
Net::Daemon 0.37 - for DBD::Proxy and DBI::ProxyServer
|
||||
|
||||
RPC::PlServer 0.2016 - for DBD::Proxy and DBI::ProxyServer
|
||||
|
||||
DBD::Multiplex 1.19 - treat multiple db handles as one
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
This bundle includes all the modules used by the Perl Database
|
||||
Interface (DBI) module, created by Tim Bunce.
|
||||
|
||||
A I<Bundle> is a module that simply defines a collection of other
|
||||
modules. It is used by the L<CPAN> module to automate the fetching,
|
||||
building and installing of modules from the CPAN ftp archive sites.
|
||||
|
||||
This bundle does not deal with the various database drivers (e.g.
|
||||
DBD::Informix, DBD::Oracle etc), most of which require software from
|
||||
sources other than CPAN. You'll need to fetch and build those drivers
|
||||
yourself.
|
||||
|
||||
=head1 AUTHORS
|
||||
|
||||
Jonathan Leffler, Jochen Wiedmann and Tim Bunce.
|
||||
|
||||
=cut
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,435 @@
|
|||
{
|
||||
package DBD::ExampleP;
|
||||
|
||||
use strict;
|
||||
use Symbol;
|
||||
|
||||
use DBI qw(:sql_types);
|
||||
|
||||
require File::Spec;
|
||||
|
||||
our (@EXPORT,$VERSION,@statnames,%statnames,@stattypes,%stattypes,
|
||||
@statprec,%statprec,$drh,);
|
||||
|
||||
@EXPORT = qw(); # Do NOT @EXPORT anything.
|
||||
$VERSION = "12.014311";
|
||||
|
||||
# $Id: ExampleP.pm 14310 2010-08-02 06:35:25Z Jens $
|
||||
#
|
||||
# Copyright (c) 1994,1997,1998 Tim Bunce
|
||||
#
|
||||
# You may distribute under the terms of either the GNU General Public
|
||||
# License or the Artistic License, as specified in the Perl README file.
|
||||
|
||||
@statnames = qw(dev ino mode nlink
|
||||
uid gid rdev size
|
||||
atime mtime ctime
|
||||
blksize blocks name);
|
||||
@statnames{@statnames} = (0 .. @statnames-1);
|
||||
|
||||
@stattypes = (SQL_INTEGER, SQL_INTEGER, SQL_INTEGER, SQL_INTEGER,
|
||||
SQL_INTEGER, SQL_INTEGER, SQL_INTEGER, SQL_INTEGER,
|
||||
SQL_INTEGER, SQL_INTEGER, SQL_INTEGER,
|
||||
SQL_INTEGER, SQL_INTEGER, SQL_VARCHAR);
|
||||
@stattypes{@statnames} = @stattypes;
|
||||
@statprec = ((10) x (@statnames-1), 1024);
|
||||
@statprec{@statnames} = @statprec;
|
||||
die unless @statnames == @stattypes;
|
||||
die unless @statprec == @stattypes;
|
||||
|
||||
$drh = undef; # holds driver handle once initialised
|
||||
#$gensym = "SYM000"; # used by st::execute() for filehandles
|
||||
|
||||
sub driver{
|
||||
return $drh if $drh;
|
||||
my($class, $attr) = @_;
|
||||
$class .= "::dr";
|
||||
($drh) = DBI::_new_drh($class, {
|
||||
'Name' => 'ExampleP',
|
||||
'Version' => $VERSION,
|
||||
'Attribution' => 'DBD Example Perl stub by Tim Bunce',
|
||||
}, ['example implementors private data '.__PACKAGE__]);
|
||||
$drh;
|
||||
}
|
||||
|
||||
sub CLONE {
|
||||
undef $drh;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
{ package DBD::ExampleP::dr; # ====== DRIVER ======
|
||||
$imp_data_size = 0;
|
||||
use strict;
|
||||
|
||||
sub connect { # normally overridden, but a handy default
|
||||
my($drh, $dbname, $user, $auth)= @_;
|
||||
my ($outer, $dbh) = DBI::_new_dbh($drh, {
|
||||
Name => $dbname,
|
||||
examplep_private_dbh_attrib => 42, # an example, for testing
|
||||
});
|
||||
$dbh->{examplep_get_info} = {
|
||||
29 => '"', # SQL_IDENTIFIER_QUOTE_CHAR
|
||||
41 => '.', # SQL_CATALOG_NAME_SEPARATOR
|
||||
114 => 1, # SQL_CATALOG_LOCATION
|
||||
};
|
||||
#$dbh->{Name} = $dbname;
|
||||
$dbh->STORE('Active', 1);
|
||||
return $outer;
|
||||
}
|
||||
|
||||
sub data_sources {
|
||||
return ("dbi:ExampleP:dir=."); # possibly usefully meaningless
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
{ package DBD::ExampleP::db; # ====== DATABASE ======
|
||||
$imp_data_size = 0;
|
||||
use strict;
|
||||
|
||||
sub prepare {
|
||||
my($dbh, $statement)= @_;
|
||||
my @fields;
|
||||
my($fields, $dir) = $statement =~ m/^\s*select\s+(.*?)\s+from\s+(\S*)/i;
|
||||
|
||||
if (defined $fields and defined $dir) {
|
||||
@fields = ($fields eq '*')
|
||||
? keys %DBD::ExampleP::statnames
|
||||
: split(/\s*,\s*/, $fields);
|
||||
}
|
||||
else {
|
||||
return $dbh->set_err($DBI::stderr, "Syntax error in select statement (\"$statement\")")
|
||||
unless $statement =~ m/^\s*set\s+/;
|
||||
# the SET syntax is just a hack so the ExampleP driver can
|
||||
# be used to test non-select statements.
|
||||
# Now we have DBI::DBM etc., ExampleP should be deprecated
|
||||
}
|
||||
|
||||
my ($outer, $sth) = DBI::_new_sth($dbh, {
|
||||
'Statement' => $statement,
|
||||
examplep_private_sth_attrib => 24, # an example, for testing
|
||||
}, ['example implementors private data '.__PACKAGE__]);
|
||||
|
||||
my @bad = map {
|
||||
defined $DBD::ExampleP::statnames{$_} ? () : $_
|
||||
} @fields;
|
||||
return $dbh->set_err($DBI::stderr, "Unknown field names: @bad")
|
||||
if @bad;
|
||||
|
||||
$outer->STORE('NUM_OF_FIELDS' => scalar(@fields));
|
||||
|
||||
$sth->{examplep_ex_dir} = $dir if defined($dir) && $dir !~ /\?/;
|
||||
$outer->STORE('NUM_OF_PARAMS' => ($dir) ? $dir =~ tr/?/?/ : 0);
|
||||
|
||||
if (@fields) {
|
||||
$outer->STORE('NAME' => \@fields);
|
||||
$outer->STORE('NULLABLE' => [ (0) x @fields ]);
|
||||
$outer->STORE('SCALE' => [ (0) x @fields ]);
|
||||
}
|
||||
|
||||
$outer;
|
||||
}
|
||||
|
||||
|
||||
sub table_info {
|
||||
my $dbh = shift;
|
||||
my ($catalog, $schema, $table, $type) = @_;
|
||||
|
||||
my @types = split(/["']*,["']/, $type || 'TABLE');
|
||||
my %types = map { $_=>$_ } @types;
|
||||
|
||||
# Return a list of all subdirectories
|
||||
my $dh = Symbol::gensym(); # "DBD::ExampleP::".++$DBD::ExampleP::gensym;
|
||||
my $dir = $catalog || File::Spec->curdir();
|
||||
my @list;
|
||||
if ($types{VIEW}) { # for use by test harness
|
||||
push @list, [ undef, "schema", "table", 'VIEW', undef ];
|
||||
push @list, [ undef, "sch-ema", "table", 'VIEW', undef ];
|
||||
push @list, [ undef, "schema", "ta-ble", 'VIEW', undef ];
|
||||
push @list, [ undef, "sch ema", "table", 'VIEW', undef ];
|
||||
push @list, [ undef, "schema", "ta ble", 'VIEW', undef ];
|
||||
}
|
||||
if ($types{TABLE}) {
|
||||
no strict 'refs';
|
||||
opendir($dh, $dir)
|
||||
or return $dbh->set_err(int($!), "Failed to open directory $dir: $!");
|
||||
while (defined(my $item = readdir($dh))) {
|
||||
if ($^O eq 'VMS') {
|
||||
# if on VMS then avoid warnings from catdir if you use a file
|
||||
# (not a dir) as the item below
|
||||
next if $item !~ /\.dir$/oi;
|
||||
}
|
||||
my $file = File::Spec->catdir($dir,$item);
|
||||
next unless -d $file;
|
||||
my($dev, $ino, $mode, $nlink, $uid) = lstat($file);
|
||||
my $pwnam = undef; # eval { scalar(getpwnam($uid)) } || $uid;
|
||||
push @list, [ $dir, $pwnam, $item, 'TABLE', undef ];
|
||||
}
|
||||
close($dh);
|
||||
}
|
||||
# We would like to simply do a DBI->connect() here. However,
|
||||
# this is wrong if we are in a subclass like DBI::ProxyServer.
|
||||
$dbh->{'dbd_sponge_dbh'} ||= DBI->connect("DBI:Sponge:", '','')
|
||||
or return $dbh->set_err($DBI::err,
|
||||
"Failed to connect to DBI::Sponge: $DBI::errstr");
|
||||
|
||||
my $attr = {
|
||||
'rows' => \@list,
|
||||
'NUM_OF_FIELDS' => 5,
|
||||
'NAME' => ['TABLE_CAT', 'TABLE_SCHEM', 'TABLE_NAME',
|
||||
'TABLE_TYPE', 'REMARKS'],
|
||||
'TYPE' => [DBI::SQL_VARCHAR(), DBI::SQL_VARCHAR(),
|
||||
DBI::SQL_VARCHAR(), DBI::SQL_VARCHAR(), DBI::SQL_VARCHAR() ],
|
||||
'NULLABLE' => [1, 1, 1, 1, 1]
|
||||
};
|
||||
my $sdbh = $dbh->{'dbd_sponge_dbh'};
|
||||
my $sth = $sdbh->prepare("SHOW TABLES FROM $dir", $attr)
|
||||
or return $dbh->set_err($sdbh->err(), $sdbh->errstr());
|
||||
$sth;
|
||||
}
|
||||
|
||||
|
||||
sub type_info_all {
|
||||
my ($dbh) = @_;
|
||||
my $ti = [
|
||||
{ TYPE_NAME => 0,
|
||||
DATA_TYPE => 1,
|
||||
COLUMN_SIZE => 2,
|
||||
LITERAL_PREFIX => 3,
|
||||
LITERAL_SUFFIX => 4,
|
||||
CREATE_PARAMS => 5,
|
||||
NULLABLE => 6,
|
||||
CASE_SENSITIVE => 7,
|
||||
SEARCHABLE => 8,
|
||||
UNSIGNED_ATTRIBUTE=> 9,
|
||||
FIXED_PREC_SCALE=> 10,
|
||||
AUTO_UNIQUE_VALUE => 11,
|
||||
LOCAL_TYPE_NAME => 12,
|
||||
MINIMUM_SCALE => 13,
|
||||
MAXIMUM_SCALE => 14,
|
||||
},
|
||||
[ 'VARCHAR', DBI::SQL_VARCHAR, 1024, "'","'", undef, 0, 1, 1, 0, 0,0,undef,0,0 ],
|
||||
[ 'INTEGER', DBI::SQL_INTEGER, 10, "","", undef, 0, 0, 1, 0, 0,0,undef,0,0 ],
|
||||
];
|
||||
return $ti;
|
||||
}
|
||||
|
||||
|
||||
sub ping {
|
||||
(shift->FETCH('Active')) ? 2 : 0; # the value 2 is checked for by t/80proxy.t
|
||||
}
|
||||
|
||||
|
||||
sub disconnect {
|
||||
shift->STORE(Active => 0);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
sub get_info {
|
||||
my ($dbh, $info_type) = @_;
|
||||
return $dbh->{examplep_get_info}->{$info_type};
|
||||
}
|
||||
|
||||
|
||||
sub FETCH {
|
||||
my ($dbh, $attrib) = @_;
|
||||
# In reality this would interrogate the database engine to
|
||||
# either return dynamic values that cannot be precomputed
|
||||
# or fetch and cache attribute values too expensive to prefetch.
|
||||
# else pass up to DBI to handle
|
||||
return $INC{"DBD/ExampleP.pm"} if $attrib eq 'example_driver_path';
|
||||
return $dbh->SUPER::FETCH($attrib);
|
||||
}
|
||||
|
||||
|
||||
sub STORE {
|
||||
my ($dbh, $attrib, $value) = @_;
|
||||
# store only known attributes else pass up to DBI to handle
|
||||
if ($attrib eq 'examplep_set_err') {
|
||||
# a fake attribute to enable a test case where STORE issues a warning
|
||||
$dbh->set_err($value, $value);
|
||||
return;
|
||||
}
|
||||
if ($attrib eq 'AutoCommit') {
|
||||
# convert AutoCommit values to magic ones to let DBI
|
||||
# know that the driver has 'handled' the AutoCommit attribute
|
||||
$value = ($value) ? -901 : -900;
|
||||
}
|
||||
return $dbh->{$attrib} = $value if $attrib =~ /^examplep_/;
|
||||
return $dbh->SUPER::STORE($attrib, $value);
|
||||
}
|
||||
|
||||
sub DESTROY {
|
||||
my $dbh = shift;
|
||||
$dbh->disconnect if $dbh->FETCH('Active');
|
||||
undef
|
||||
}
|
||||
|
||||
|
||||
# This is an example to demonstrate the use of driver-specific
|
||||
# methods via $dbh->func().
|
||||
# Use it as follows:
|
||||
# my @tables = $dbh->func($re, 'examplep_tables');
|
||||
#
|
||||
# Returns all the tables that match the regular expression $re.
|
||||
sub examplep_tables {
|
||||
my $dbh = shift; my $re = shift;
|
||||
grep { $_ =~ /$re/ } $dbh->tables();
|
||||
}
|
||||
|
||||
sub parse_trace_flag {
|
||||
my ($h, $name) = @_;
|
||||
return 0x01000000 if $name eq 'foo';
|
||||
return 0x02000000 if $name eq 'bar';
|
||||
return 0x04000000 if $name eq 'baz';
|
||||
return 0x08000000 if $name eq 'boo';
|
||||
return 0x10000000 if $name eq 'bop';
|
||||
return $h->SUPER::parse_trace_flag($name);
|
||||
}
|
||||
|
||||
sub private_attribute_info {
|
||||
return { example_driver_path => undef };
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
{ package DBD::ExampleP::st; # ====== STATEMENT ======
|
||||
$imp_data_size = 0;
|
||||
use strict; no strict 'refs'; # cause problems with filehandles
|
||||
|
||||
sub bind_param {
|
||||
my($sth, $param, $value, $attribs) = @_;
|
||||
$sth->{'dbd_param'}->[$param-1] = $value;
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
sub execute {
|
||||
my($sth, @dir) = @_;
|
||||
my $dir;
|
||||
|
||||
if (@dir) {
|
||||
$sth->bind_param($_, $dir[$_-1]) or return
|
||||
foreach (1..@dir);
|
||||
}
|
||||
|
||||
my $dbd_param = $sth->{'dbd_param'} || [];
|
||||
return $sth->set_err(2, @$dbd_param." values bound when $sth->{NUM_OF_PARAMS} expected")
|
||||
unless @$dbd_param == $sth->{NUM_OF_PARAMS};
|
||||
|
||||
return 0 unless $sth->{NUM_OF_FIELDS}; # not a select
|
||||
|
||||
$dir = $dbd_param->[0] || $sth->{examplep_ex_dir};
|
||||
return $sth->set_err(2, "No bind parameter supplied")
|
||||
unless defined $dir;
|
||||
|
||||
$sth->finish;
|
||||
|
||||
#
|
||||
# If the users asks for directory "long_list_4532", then we fake a
|
||||
# directory with files "file4351", "file4350", ..., "file0".
|
||||
# This is a special case used for testing, especially DBD::Proxy.
|
||||
#
|
||||
if ($dir =~ /^long_list_(\d+)$/) {
|
||||
$sth->{dbd_dir} = [ $1 ]; # array ref indicates special mode
|
||||
$sth->{dbd_datahandle} = undef;
|
||||
}
|
||||
else {
|
||||
$sth->{dbd_dir} = $dir;
|
||||
my $sym = Symbol::gensym(); # "DBD::ExampleP::".++$DBD::ExampleP::gensym;
|
||||
opendir($sym, $dir)
|
||||
or return $sth->set_err(2, "opendir($dir): $!");
|
||||
$sth->{dbd_datahandle} = $sym;
|
||||
}
|
||||
$sth->STORE(Active => 1);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
sub fetch {
|
||||
my $sth = shift;
|
||||
my $dir = $sth->{dbd_dir};
|
||||
my %s;
|
||||
|
||||
if (ref $dir) { # special fake-data test mode
|
||||
my $num = $dir->[0]--;
|
||||
unless ($num > 0) {
|
||||
$sth->finish();
|
||||
return;
|
||||
}
|
||||
my $time = time;
|
||||
@s{@DBD::ExampleP::statnames} =
|
||||
( 2051, 1000+$num, 0644, 2, $>, $), 0, 1024,
|
||||
$time, $time, $time, 512, 2, "file$num")
|
||||
}
|
||||
else { # normal mode
|
||||
my $dh = $sth->{dbd_datahandle}
|
||||
or return $sth->set_err($DBI::stderr, "fetch without successful execute");
|
||||
my $f = readdir($dh);
|
||||
unless ($f) {
|
||||
$sth->finish;
|
||||
return;
|
||||
}
|
||||
# untaint $f so that we can use this for DBI taint tests
|
||||
($f) = ($f =~ m/^(.*)$/);
|
||||
my $file = File::Spec->catfile($dir, $f);
|
||||
# put in all the data fields
|
||||
@s{ @DBD::ExampleP::statnames } = (lstat($file), $f);
|
||||
}
|
||||
|
||||
# return just what fields the query asks for
|
||||
my @new = @s{ @{$sth->{NAME}} };
|
||||
|
||||
return $sth->_set_fbav(\@new);
|
||||
}
|
||||
*fetchrow_arrayref = \&fetch;
|
||||
|
||||
|
||||
sub finish {
|
||||
my $sth = shift;
|
||||
closedir($sth->{dbd_datahandle}) if $sth->{dbd_datahandle};
|
||||
$sth->{dbd_datahandle} = undef;
|
||||
$sth->{dbd_dir} = undef;
|
||||
$sth->SUPER::finish();
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
sub FETCH {
|
||||
my ($sth, $attrib) = @_;
|
||||
# In reality this would interrogate the database engine to
|
||||
# either return dynamic values that cannot be precomputed
|
||||
# or fetch and cache attribute values too expensive to prefetch.
|
||||
if ($attrib eq 'TYPE'){
|
||||
return [ @DBD::ExampleP::stattypes{ @{ $sth->FETCH(q{NAME_lc}) } } ];
|
||||
}
|
||||
elsif ($attrib eq 'PRECISION'){
|
||||
return [ @DBD::ExampleP::statprec{ @{ $sth->FETCH(q{NAME_lc}) } } ];
|
||||
}
|
||||
elsif ($attrib eq 'ParamValues') {
|
||||
my $dbd_param = $sth->{dbd_param} || [];
|
||||
my %pv = map { $_ => $dbd_param->[$_-1] } 1..@$dbd_param;
|
||||
return \%pv;
|
||||
}
|
||||
# else pass up to DBI to handle
|
||||
return $sth->SUPER::FETCH($attrib);
|
||||
}
|
||||
|
||||
|
||||
sub STORE {
|
||||
my ($sth, $attrib, $value) = @_;
|
||||
# would normally validate and only store known attributes
|
||||
# else pass up to DBI to handle
|
||||
return $sth->{$attrib} = $value
|
||||
if $attrib eq 'NAME' or $attrib eq 'NULLABLE' or $attrib eq 'SCALE' or $attrib eq 'PRECISION';
|
||||
return $sth->SUPER::STORE($attrib, $value);
|
||||
}
|
||||
|
||||
*parse_trace_flag = \&DBD::ExampleP::db::parse_trace_flag;
|
||||
}
|
||||
|
||||
1;
|
||||
# vim: sw=4:ts=8
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,665 @@
|
|||
=head1 NAME
|
||||
|
||||
DBD::File::Developers - Developers documentation for DBD::File
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
package DBD::myDriver;
|
||||
|
||||
use base qw( DBD::File );
|
||||
|
||||
sub driver
|
||||
{
|
||||
...
|
||||
my $drh = $proto->SUPER::driver ($attr);
|
||||
...
|
||||
return $drh->{class};
|
||||
}
|
||||
|
||||
sub CLONE { ... }
|
||||
|
||||
package DBD::myDriver::dr;
|
||||
|
||||
@ISA = qw( DBD::File::dr );
|
||||
|
||||
sub data_sources { ... }
|
||||
...
|
||||
|
||||
package DBD::myDriver::db;
|
||||
|
||||
@ISA = qw( DBD::File::db );
|
||||
|
||||
sub init_valid_attributes { ... }
|
||||
sub init_default_attributes { ... }
|
||||
sub set_versions { ... }
|
||||
sub validate_STORE_attr { my ($dbh, $attrib, $value) = @_; ... }
|
||||
sub validate_FETCH_attr { my ($dbh, $attrib) = @_; ... }
|
||||
sub get_myd_versions { ... }
|
||||
|
||||
package DBD::myDriver::st;
|
||||
|
||||
@ISA = qw( DBD::File::st );
|
||||
|
||||
sub FETCH { ... }
|
||||
sub STORE { ... }
|
||||
|
||||
package DBD::myDriver::Statement;
|
||||
|
||||
@ISA = qw( DBD::File::Statement );
|
||||
|
||||
package DBD::myDriver::Table;
|
||||
|
||||
@ISA = qw( DBD::File::Table );
|
||||
|
||||
my %reset_on_modify = (
|
||||
myd_abc => "myd_foo",
|
||||
myd_mno => "myd_bar",
|
||||
);
|
||||
__PACKAGE__->register_reset_on_modify (\%reset_on_modify);
|
||||
my %compat_map = (
|
||||
abc => 'foo_abc',
|
||||
xyz => 'foo_xyz',
|
||||
);
|
||||
__PACKAGE__->register_compat_map (\%compat_map);
|
||||
|
||||
sub bootstrap_table_meta { ... }
|
||||
sub init_table_meta { ... }
|
||||
sub table_meta_attr_changed { ... }
|
||||
sub open_data { ... }
|
||||
|
||||
sub fetch_row { ... }
|
||||
sub push_row { ... }
|
||||
sub push_names { ... }
|
||||
|
||||
# optimize the SQL engine by add one or more of
|
||||
sub update_current_row { ... }
|
||||
# or
|
||||
sub update_specific_row { ... }
|
||||
# or
|
||||
sub update_one_row { ... }
|
||||
# or
|
||||
sub insert_new_row { ... }
|
||||
# or
|
||||
sub delete_current_row { ... }
|
||||
# or
|
||||
sub delete_one_row { ... }
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
This document describes how DBD developers can write DBD::File based DBI
|
||||
drivers. It supplements L<DBI::DBD> and L<DBI::DBD::SqlEngine::Developers>,
|
||||
which you should read first.
|
||||
|
||||
=head1 CLASSES
|
||||
|
||||
Each DBI driver must provide a package global C<driver> method and three
|
||||
DBI related classes:
|
||||
|
||||
=over 4
|
||||
|
||||
=item DBD::File::dr
|
||||
|
||||
Driver package, contains the methods DBI calls indirectly via DBI
|
||||
interface:
|
||||
|
||||
DBI->connect ('DBI:DBM:', undef, undef, {})
|
||||
|
||||
# invokes
|
||||
package DBD::DBM::dr;
|
||||
@DBD::DBM::dr::ISA = qw( DBD::File::dr );
|
||||
|
||||
sub connect ($$;$$$)
|
||||
{
|
||||
...
|
||||
}
|
||||
|
||||
Similar for C<< data_sources >> and C<< disconnect_all >>.
|
||||
|
||||
Pure Perl DBI drivers derived from DBD::File do not usually need to
|
||||
override any of the methods provided through the DBD::XXX::dr package
|
||||
however if you need additional initialization in the connect method
|
||||
you may need to.
|
||||
|
||||
=item DBD::File::db
|
||||
|
||||
Contains the methods which are called through DBI database handles
|
||||
(C<< $dbh >>). e.g.,
|
||||
|
||||
$sth = $dbh->prepare ("select * from foo");
|
||||
# returns the f_encoding setting for table foo
|
||||
$dbh->csv_get_meta ("foo", "f_encoding");
|
||||
|
||||
DBD::File provides the typical methods required here. Developers who
|
||||
write DBI drivers based on DBD::File need to override the methods C<<
|
||||
set_versions >> and C<< init_valid_attributes >>.
|
||||
|
||||
=item DBD::File::st
|
||||
|
||||
Contains the methods to deal with prepared statement handles. e.g.,
|
||||
|
||||
$sth->execute () or die $sth->errstr;
|
||||
|
||||
=back
|
||||
|
||||
=head2 DBD::File
|
||||
|
||||
This is the main package containing the routines to initialize
|
||||
DBD::File based DBI drivers. Primarily the C<< DBD::File::driver >>
|
||||
method is invoked, either directly from DBI when the driver is
|
||||
initialized or from the derived class.
|
||||
|
||||
package DBD::DBM;
|
||||
|
||||
use base qw( DBD::File );
|
||||
|
||||
sub driver
|
||||
{
|
||||
my ($class, $attr) = @_;
|
||||
...
|
||||
my $drh = $class->SUPER::driver ($attr);
|
||||
...
|
||||
return $drh;
|
||||
}
|
||||
|
||||
It is not necessary to implement your own driver method as long as
|
||||
additional initialization (e.g. installing more private driver
|
||||
methods) is not required. You do not need to call C<< setup_driver >>
|
||||
as DBD::File takes care of it.
|
||||
|
||||
=head2 DBD::File::dr
|
||||
|
||||
The driver package contains the methods DBI calls indirectly via the DBI
|
||||
interface (see L<DBI/DBI Class Methods>).
|
||||
|
||||
DBD::File based DBI drivers usually do not need to implement anything here,
|
||||
it is enough to do the basic initialization:
|
||||
|
||||
package DBD:XXX::dr;
|
||||
|
||||
@DBD::XXX::dr::ISA = qw (DBD::File::dr);
|
||||
$DBD::XXX::dr::imp_data_size = 0;
|
||||
$DBD::XXX::dr::data_sources_attr = undef;
|
||||
$DBD::XXX::ATTRIBUTION = "DBD::XXX $DBD::XXX::VERSION by Hans Mustermann";
|
||||
|
||||
=head2 DBD::File::db
|
||||
|
||||
This package defines the database methods, which are called via the DBI
|
||||
database handle C<< $dbh >>.
|
||||
|
||||
Methods provided by DBD::File:
|
||||
|
||||
=over 4
|
||||
|
||||
=item ping
|
||||
|
||||
Simply returns the content of the C<< Active >> attribute. Override
|
||||
when your driver needs more complicated actions here.
|
||||
|
||||
=item prepare
|
||||
|
||||
Prepares a new SQL statement to execute. Returns a statement handle,
|
||||
C<< $sth >> - instance of the DBD:XXX::st. It is neither required nor
|
||||
recommended to override this method.
|
||||
|
||||
=item FETCH
|
||||
|
||||
Fetches an attribute of a DBI database object. Private handle attributes
|
||||
must have a prefix (this is mandatory). If a requested attribute is
|
||||
detected as a private attribute without a valid prefix, the driver prefix
|
||||
(written as C<$drv_prefix>) is added.
|
||||
|
||||
The driver prefix is extracted from the attribute name and verified against
|
||||
C<< $dbh->{$drv_prefix . "valid_attrs"} >> (when it exists). If the
|
||||
requested attribute value is not listed as a valid attribute, this method
|
||||
croaks. If the attribute is valid and readonly (listed in C<< $dbh->{
|
||||
$drv_prefix . "readonly_attrs" } >> when it exists), a real copy of the
|
||||
attribute value is returned. So it's not possible to modify
|
||||
C<f_valid_attrs> from outside of DBD::File::db or a derived class.
|
||||
|
||||
=item STORE
|
||||
|
||||
Stores a database private attribute. Private handle attributes must have a
|
||||
prefix (this is mandatory). If a requested attribute is detected as a private
|
||||
attribute without a valid prefix, the driver prefix (written as
|
||||
C<$drv_prefix>) is added. If the database handle has an attribute
|
||||
C<${drv_prefix}_valid_attrs> - for attribute names which are not listed in
|
||||
that hash, this method croaks. If the database handle has an attribute
|
||||
C<${drv_prefix}_readonly_attrs>, only attributes which are not listed there
|
||||
can be stored (once they are initialized). Trying to overwrite such an
|
||||
immutable attribute forces this method to croak.
|
||||
|
||||
An example of a valid attributes list can be found in
|
||||
C<< DBD::File::db::init_valid_attributes >>.
|
||||
|
||||
=item set_versions
|
||||
|
||||
This method sets the attribute C<f_version> with the version of DBD::File.
|
||||
|
||||
This method is called at the begin of the C<connect ()> phase.
|
||||
|
||||
When overriding this method, do not forget to invoke the superior one.
|
||||
|
||||
=item init_valid_attributes
|
||||
|
||||
This method is called after the database handle is instantiated as the
|
||||
first attribute initialization.
|
||||
|
||||
C<< DBD::File::db::init_valid_attributes >> initializes the attributes
|
||||
C<f_valid_attrs> and C<f_readonly_attrs>.
|
||||
|
||||
When overriding this method, do not forget to invoke the superior one,
|
||||
preferably before doing anything else. Compatibility table attribute
|
||||
access must be initialized here to allow DBD::File to instantiate the
|
||||
map tie:
|
||||
|
||||
# for DBD::CSV
|
||||
$dbh->{csv_meta} = "csv_tables";
|
||||
# for DBD::DBM
|
||||
$dbh->{dbm_meta} = "dbm_tables";
|
||||
# for DBD::AnyData
|
||||
$dbh->{ad_meta} = "ad_tables";
|
||||
|
||||
=item init_default_attributes
|
||||
|
||||
This method is called after the database handle is instantiated to
|
||||
initialize the default attributes.
|
||||
|
||||
C<< DBD::File::db::init_default_attributes >> initializes the attributes
|
||||
C<f_dir>, C<f_meta>, C<f_meta_map>, C<f_version>.
|
||||
|
||||
When the derived implementor class provides the attribute to validate
|
||||
attributes (e.g. C<< $dbh->{dbm_valid_attrs} = {...}; >>) or the attribute
|
||||
containing the immutable attributes (e.g.
|
||||
C<< $dbh->{dbm_readonly_attrs} = {...}; >>), the attributes
|
||||
C<drv_valid_attrs>, C<drv_readonly_attrs>, C<drv_version> and C<drv_meta>
|
||||
are added (when available) to the list of valid and immutable attributes
|
||||
(where C<drv_> is interpreted as the driver prefix).
|
||||
|
||||
If C<drv_meta> is set, an attribute with the name in C<drv_meta> is
|
||||
initialized providing restricted read/write access to the meta data of the
|
||||
tables using C<DBD::File::TieTables> in the first (table) level and
|
||||
C<DBD::File::TieMeta> for the meta attribute level. C<DBD::File::TieTables>
|
||||
uses C<DBD::DRV::Table::get_table_meta> to initialize the second level
|
||||
tied hash on FETCH/STORE. The C<DBD::File::TieMeta> class uses
|
||||
C<DBD::DRV::Table::get_table_meta_attr> to FETCH attribute values and
|
||||
C<DBD::DRV::Table::set_table_meta_attr> to STORE attribute values. This
|
||||
allows it to map meta attributes for compatibility reasons.
|
||||
|
||||
=item get_single_table_meta
|
||||
|
||||
=item get_file_meta
|
||||
|
||||
Retrieve an attribute from a table's meta information. The method
|
||||
signature is C<< get_file_meta ($dbh, $table, $attr) >>. This method
|
||||
is called by the injected db handle method C<< ${drv_prefix}get_meta >>.
|
||||
|
||||
While get_file_meta allows C<$table> or C<$attr> to be a list of tables or
|
||||
attributes to retrieve, get_single_table_meta allows only one table name
|
||||
and only one attribute name. A table name of C<'.'> (single dot) is
|
||||
interpreted as the default table and this will retrieve the appropriate
|
||||
attribute globally from the dbh. This has the same restrictions as
|
||||
C<< $dbh->{$attrib} >>.
|
||||
|
||||
get_file_meta allows C<'+'> and C<'*'> as wildcards for table names and
|
||||
C<$table> being a regular expression matching against the table names
|
||||
(evaluated without the default table). The table name C<'*'> is
|
||||
I<all currently known tables, including the default one>. The table
|
||||
name C<'+'> is I<all table names which conform to
|
||||
ANSI file name restrictions> (/^[_A-Za-z0-9]+$/).
|
||||
|
||||
The table meta information is retrieved using the get_table_meta and
|
||||
get_table_meta_attr methods of the table class of the implementation.
|
||||
|
||||
=item set_single_table_meta
|
||||
|
||||
=item set_file_meta
|
||||
|
||||
Sets an attribute in a table's meta information. The method signature is
|
||||
C<< set_file_meta ($dbh, $table, $attr, $value) >>. This method is called
|
||||
by the injected db handle method C<< ${drv_prefix}set_meta >>.
|
||||
|
||||
While set_file_meta allows C<$table> to be a list of tables and C<$attr>
|
||||
to be a hash of several attributes to set, set_single_table_meta allows
|
||||
only one table name and only one attribute name/value pair.
|
||||
|
||||
The wildcard characters for the table name are the same as for
|
||||
get_file_meta.
|
||||
|
||||
The table meta information is updated using the get_table_meta and
|
||||
set_table_meta_attr methods of the table class of the implementation.
|
||||
|
||||
=item clear_file_meta
|
||||
|
||||
Clears all meta information cached about a table. The method signature is
|
||||
C<< clear_file_meta ($dbh, $table) >>. This method is called
|
||||
by the injected db handle method C<< ${drv_prefix}clear_meta >>.
|
||||
|
||||
=back
|
||||
|
||||
=head2 DBD::File::st
|
||||
|
||||
Contains the methods to deal with prepared statement handles:
|
||||
|
||||
=over 4
|
||||
|
||||
=item FETCH
|
||||
|
||||
Fetches statement handle attributes. Supported attributes (for full overview
|
||||
see L<DBI/Statement Handle Attributes>) are C<NAME>, C<TYPE>, C<PRECISION>
|
||||
and C<NULLABLE> in case that SQL::Statement is used as SQL execution engine
|
||||
and a statement is successful prepared. When SQL::Statement has additional
|
||||
information about a table, those information are returned. Otherwise, the
|
||||
same defaults as in L<DBI::DBD::SqlEngine> are used.
|
||||
|
||||
This method usually requires extending in a derived implementation.
|
||||
See L<DBD::CSV> or L<DBD::DBM> for some example.
|
||||
|
||||
=back
|
||||
|
||||
=head2 DBD::File::TableSource::FileSystem
|
||||
|
||||
Provides data sources and table information on database driver and database
|
||||
handle level.
|
||||
|
||||
package DBD::File::TableSource::FileSystem;
|
||||
|
||||
sub data_sources ($;$)
|
||||
{
|
||||
my ($class, $drh, $attrs) = @_;
|
||||
...
|
||||
}
|
||||
|
||||
sub avail_tables
|
||||
{
|
||||
my ($class, $drh) = @_;
|
||||
...
|
||||
}
|
||||
|
||||
The C<data_sources> method is called when the user invokes any of the
|
||||
following:
|
||||
|
||||
@ary = DBI->data_sources ($driver);
|
||||
@ary = DBI->data_sources ($driver, \%attr);
|
||||
|
||||
@ary = $dbh->data_sources ();
|
||||
@ary = $dbh->data_sources (\%attr);
|
||||
|
||||
The C<avail_tables> method is called when the user invokes any of the
|
||||
following:
|
||||
|
||||
@names = $dbh->tables ($catalog, $schema, $table, $type);
|
||||
|
||||
$sth = $dbh->table_info ($catalog, $schema, $table, $type);
|
||||
$sth = $dbh->table_info ($catalog, $schema, $table, $type, \%attr);
|
||||
|
||||
$dbh->func ("list_tables");
|
||||
|
||||
Every time where an C<\%attr> argument can be specified, this C<\%attr>
|
||||
object's C<sql_table_source> attribute is preferred over the C<$dbh>
|
||||
attribute or the driver default.
|
||||
|
||||
=head2 DBD::File::DataSource::Stream
|
||||
|
||||
package DBD::File::DataSource::Stream;
|
||||
|
||||
@DBD::File::DataSource::Stream::ISA = 'DBI::DBD::SqlEngine::DataSource';
|
||||
|
||||
sub complete_table_name
|
||||
{
|
||||
my ($self, $meta, $file, $respect_case) = @_;
|
||||
...
|
||||
}
|
||||
|
||||
Clears all meta attributes identifying a file: C<f_fqfn>, C<f_fqbn> and
|
||||
C<f_fqln>. The table name is set according to C<$respect_case> and
|
||||
C<< $meta->{sql_identifier_case} >> (SQL_IC_LOWER, SQL_IC_UPPER).
|
||||
|
||||
package DBD::File::DataSource::Stream;
|
||||
|
||||
sub apply_encoding
|
||||
{
|
||||
my ($self, $meta, $fn) = @_;
|
||||
...
|
||||
}
|
||||
|
||||
Applies the encoding from I<meta information> (C<< $meta->{f_encoding} >>)
|
||||
to the file handled opened in C<open_data>.
|
||||
|
||||
package DBD::File::DataSource::Stream;
|
||||
|
||||
sub open_data
|
||||
{
|
||||
my ($self, $meta, $attrs, $flags) = @_;
|
||||
...
|
||||
}
|
||||
|
||||
Opens (C<dup (2)>) the file handle provided in C<< $meta->{f_file} >>.
|
||||
|
||||
package DBD::File::DataSource::Stream;
|
||||
|
||||
sub can_flock { ... }
|
||||
|
||||
Returns whether C<flock (2)> is available or not (avoids retesting in
|
||||
subclasses).
|
||||
|
||||
=head2 DBD::File::DataSource::File
|
||||
|
||||
package DBD::File::DataSource::File;
|
||||
|
||||
sub complete_table_name ($$;$)
|
||||
{
|
||||
my ($self, $meta, $table, $respect_case) = @_;
|
||||
...
|
||||
}
|
||||
|
||||
The method C<complete_table_name> tries to map a filename to the associated
|
||||
table name. It is called with a partially filled meta structure for the
|
||||
resulting table containing at least the following attributes:
|
||||
C<< f_ext >>, C<< f_dir >>, C<< f_lockfile >> and C<< sql_identifier_case >>.
|
||||
|
||||
If a file/table map can be found then this method sets the C<< f_fqfn
|
||||
>>, C<< f_fqbn >>, C<< f_fqln >> and C<< table_name >> attributes in
|
||||
the meta structure. If a map cannot be found the table name will be
|
||||
undef.
|
||||
|
||||
package DBD::File::DataSource::File;
|
||||
|
||||
sub open_data ($)
|
||||
{
|
||||
my ($self, $meta, $attrs, $flags) = @_;
|
||||
...
|
||||
}
|
||||
|
||||
Depending on the attributes set in the table's meta data, the
|
||||
following steps are performed. Unless C<< f_dontopen >> is set to a
|
||||
true value, C<< f_fqfn >> must contain the full qualified file name
|
||||
for the table to work on (file2table ensures this). The encoding in
|
||||
C<< f_encoding >> is applied if set and the file is opened. If
|
||||
C<<f_fqln >> (full qualified lock name) is set, this file is opened,
|
||||
too. Depending on the value in C<< f_lock >>, the appropriate lock is
|
||||
set on the opened data file or lock file.
|
||||
|
||||
=head2 DBD::File::Statement
|
||||
|
||||
Derives from DBI::SQL::Nano::Statement to provide following method:
|
||||
|
||||
=over 4
|
||||
|
||||
=item open_table
|
||||
|
||||
Implements the open_table method required by L<SQL::Statement> and
|
||||
L<DBI::SQL::Nano>. All the work for opening the file(s) belonging to the
|
||||
table is handled and parametrized in DBD::File::Table. Unless you intend
|
||||
to add anything to the following implementation, an empty DBD::XXX::Statement
|
||||
package satisfies DBD::File.
|
||||
|
||||
sub open_table ($$$$$)
|
||||
{
|
||||
my ($self, $data, $table, $createMode, $lockMode) = @_;
|
||||
|
||||
my $class = ref $self;
|
||||
$class =~ s/::Statement/::Table/;
|
||||
|
||||
my $flags = {
|
||||
createMode => $createMode,
|
||||
lockMode => $lockMode,
|
||||
};
|
||||
$self->{command} eq "DROP" and $flags->{dropMode} = 1;
|
||||
|
||||
return $class->new ($data, { table => $table }, $flags);
|
||||
} # open_table
|
||||
|
||||
=back
|
||||
|
||||
=head2 DBD::File::Table
|
||||
|
||||
Derives from DBI::SQL::Nano::Table and provides physical file access for
|
||||
the table data which are stored in the files.
|
||||
|
||||
=over 4
|
||||
|
||||
=item bootstrap_table_meta
|
||||
|
||||
Initializes a table meta structure. Can be safely overridden in a
|
||||
derived class, as long as the C<< SUPER >> method is called at the end
|
||||
of the overridden method.
|
||||
|
||||
It copies the following attributes from the database into the table meta data
|
||||
C<< f_dir >>, C<< f_ext >>, C<< f_encoding >>, C<< f_lock >>, C<< f_schema >>
|
||||
and C<< f_lockfile >> and makes them sticky to the table.
|
||||
|
||||
This method should be called before you attempt to map between file
|
||||
name and table name to ensure the correct directory, extension etc. are
|
||||
used.
|
||||
|
||||
=item init_table_meta
|
||||
|
||||
Initializes more attributes of the table meta data - usually more
|
||||
expensive ones (e.g. those which require class instantiations) - when
|
||||
the file name and the table name could mapped.
|
||||
|
||||
=item get_table_meta
|
||||
|
||||
Returns the table meta data. If there are none for the required
|
||||
table, a new one is initialized. When it fails, nothing is
|
||||
returned. On success, the name of the table and the meta data
|
||||
structure is returned.
|
||||
|
||||
=item get_table_meta_attr
|
||||
|
||||
Returns a single attribute from the table meta data. If the attribute
|
||||
name appears in C<%compat_map>, the attribute name is updated from
|
||||
there.
|
||||
|
||||
=item set_table_meta_attr
|
||||
|
||||
Sets a single attribute in the table meta data. If the attribute
|
||||
name appears in C<%compat_map>, the attribute name is updated from
|
||||
there.
|
||||
|
||||
=item table_meta_attr_changed
|
||||
|
||||
Called when an attribute of the meta data is modified.
|
||||
|
||||
If the modified attribute requires to reset a calculated attribute, the
|
||||
calculated attribute is reset (deleted from meta data structure) and
|
||||
the I<initialized> flag is removed, too. The decision is made based on
|
||||
C<%register_reset_on_modify>.
|
||||
|
||||
=item register_reset_on_modify
|
||||
|
||||
Allows C<set_table_meta_attr> to reset meta attributes when special
|
||||
attributes are modified. For DBD::File, modifying one of C<f_file>, C<f_dir>,
|
||||
C<f_ext> or C<f_lockfile> will reset C<f_fqfn>. DBD::DBM extends the
|
||||
list for C<dbm_type> and C<dbm_mldbm> to reset the value of C<dbm_tietype>.
|
||||
|
||||
If your DBD has calculated values in the meta data area, then call
|
||||
C<register_reset_on_modify>:
|
||||
|
||||
my %reset_on_modify = (xxx_foo => "xxx_bar");
|
||||
__PACKAGE__->register_reset_on_modify (\%reset_on_modify);
|
||||
|
||||
=item register_compat_map
|
||||
|
||||
Allows C<get_table_meta_attr> and C<set_table_meta_attr> to update the
|
||||
attribute name to the current favored one:
|
||||
|
||||
# from DBD::DBM
|
||||
my %compat_map = (dbm_ext => "f_ext");
|
||||
__PACKAGE__->register_compat_map (\%compat_map);
|
||||
|
||||
=item open_file
|
||||
|
||||
Called to open the table's data file.
|
||||
|
||||
Depending on the attributes set in the table's meta data, the
|
||||
following steps are performed. Unless C<< f_dontopen >> is set to a
|
||||
true value, C<< f_fqfn >> must contain the full qualified file name
|
||||
for the table to work on (file2table ensures this). The encoding in
|
||||
C<< f_encoding >> is applied if set and the file is opened. If
|
||||
C<<f_fqln >> (full qualified lock name) is set, this file is opened,
|
||||
too. Depending on the value in C<< f_lock >>, the appropriate lock is
|
||||
set on the opened data file or lock file.
|
||||
|
||||
After this is done, a derived class might add more steps in an overridden
|
||||
C<< open_file >> method.
|
||||
|
||||
=item new
|
||||
|
||||
Instantiates the table. This is done in 3 steps:
|
||||
|
||||
1. get the table meta data
|
||||
2. open the data file
|
||||
3. bless the table data structure using inherited constructor new
|
||||
|
||||
It is not recommended to override the constructor of the table class.
|
||||
Find a reasonable place to add you extensions in one of the above four
|
||||
methods.
|
||||
|
||||
=item drop
|
||||
|
||||
Implements the abstract table method for the C<< DROP >>
|
||||
command. Discards table meta data after all files belonging to the
|
||||
table are closed and unlinked.
|
||||
|
||||
Overriding this method might be reasonable in very rare cases.
|
||||
|
||||
=item seek
|
||||
|
||||
Implements the abstract table method used when accessing the table from the
|
||||
engine. C<< seek >> is called every time the engine uses dumb algorithms
|
||||
for iterating over the table content.
|
||||
|
||||
=item truncate
|
||||
|
||||
Implements the abstract table method used when dumb table algorithms
|
||||
for C<< UPDATE >> or C<< DELETE >> need to truncate the table storage
|
||||
after the last written row.
|
||||
|
||||
=back
|
||||
|
||||
You should consult the documentation of C<< SQL::Eval::Table >> (see
|
||||
L<SQL::Eval>) to get more information about the abstract methods of the
|
||||
table's base class you have to override and a description of the table
|
||||
meta information expected by the SQL engines.
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
The module DBD::File is currently maintained by
|
||||
|
||||
H.Merijn Brand < h.m.brand at xs4all.nl > and
|
||||
Jens Rehsack < rehsack at googlemail.com >
|
||||
|
||||
The original author is Jochen Wiedmann.
|
||||
|
||||
=head1 COPYRIGHT AND LICENSE
|
||||
|
||||
Copyright (C) 2010-2013 by H.Merijn Brand & Jens Rehsack
|
||||
|
||||
All rights reserved.
|
||||
|
||||
You may freely distribute and/or modify this module under the terms of
|
||||
either the GNU General Public License (GPL) or the Artistic License, as
|
||||
specified in the Perl README file.
|
||||
|
||||
=cut
|
|
@ -0,0 +1,191 @@
|
|||
=head1 NAME
|
||||
|
||||
DBD::File::HowTo - Guide to create DBD::File based driver
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
perldoc DBD::File::HowTo
|
||||
perldoc DBI
|
||||
perldoc DBI::DBD
|
||||
perldoc DBD::File::Developers
|
||||
perldoc DBI::DBD::SqlEngine::Developers
|
||||
perldoc DBI::DBD::SqlEngine
|
||||
perldoc SQL::Eval
|
||||
perldoc DBI::DBD::SqlEngine::HowTo
|
||||
perldoc SQL::Statement::Embed
|
||||
perldoc DBD::File
|
||||
perldoc DBD::File::HowTo
|
||||
perldoc DBD::File::Developers
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
This document provides a step-by-step guide, how to create a new
|
||||
C<DBD::File> based DBD. It expects that you carefully read the L<DBI>
|
||||
documentation and that you're familiar with L<DBI::DBD> and had read and
|
||||
understood L<DBD::ExampleP>.
|
||||
|
||||
This document addresses experienced developers who are really sure that
|
||||
they need to invest time when writing a new DBI Driver. Writing a DBI
|
||||
Driver is neither a weekend project nor an easy job for hobby coders
|
||||
after work. Expect one or two man-month of time for the first start.
|
||||
|
||||
Those who are still reading, should be able to sing the rules of
|
||||
L<DBI::DBD/CREATING A NEW DRIVER>.
|
||||
|
||||
Of course, DBD::File is a DBI::DBD::SqlEngine and you surely read
|
||||
L<DBI::DBD::SqlEngine::HowTo> before continuing here.
|
||||
|
||||
=head1 CREATING DRIVER CLASSES
|
||||
|
||||
Do you have an entry in DBI's DBD registry? For this guide, a prefix of
|
||||
C<foo_> is assumed.
|
||||
|
||||
=head2 Sample Skeleton
|
||||
|
||||
package DBD::Foo;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use vars qw(@ISA $VERSION);
|
||||
use base qw(DBD::File);
|
||||
|
||||
use DBI ();
|
||||
|
||||
$VERSION = "0.001";
|
||||
|
||||
package DBD::Foo::dr;
|
||||
|
||||
use vars qw(@ISA $imp_data_size);
|
||||
|
||||
@ISA = qw(DBD::File::dr);
|
||||
$imp_data_size = 0;
|
||||
|
||||
package DBD::Foo::db;
|
||||
|
||||
use vars qw(@ISA $imp_data_size);
|
||||
|
||||
@ISA = qw(DBD::File::db);
|
||||
$imp_data_size = 0;
|
||||
|
||||
package DBD::Foo::st;
|
||||
|
||||
use vars qw(@ISA $imp_data_size);
|
||||
|
||||
@ISA = qw(DBD::File::st);
|
||||
$imp_data_size = 0;
|
||||
|
||||
package DBD::Foo::Statement;
|
||||
|
||||
use vars qw(@ISA);
|
||||
|
||||
@ISA = qw(DBD::File::Statement);
|
||||
|
||||
package DBD::Foo::Table;
|
||||
|
||||
use vars qw(@ISA);
|
||||
|
||||
@ISA = qw(DBD::File::Table);
|
||||
|
||||
1;
|
||||
|
||||
Tiny, eh? And all you have now is a DBD named foo which will be able to
|
||||
deal with temporary tables, as long as you use L<SQL::Statement>. In
|
||||
L<DBI::SQL::Nano> environments, this DBD can do nothing.
|
||||
|
||||
=head2 Start over
|
||||
|
||||
Based on L<DBI::DBD::SqlEngine::HowTo>, we're now having a driver which
|
||||
could do basic things. Of course, it should now derive from DBD::File
|
||||
instead of DBI::DBD::SqlEngine, shouldn't it?
|
||||
|
||||
DBD::File extends DBI::DBD::SqlEngine to deal with any kind of files.
|
||||
In principle, the only extensions required are to the table class:
|
||||
|
||||
package DBD::Foo::Table;
|
||||
|
||||
sub bootstrap_table_meta
|
||||
{
|
||||
my ( $self, $dbh, $meta, $table ) = @_;
|
||||
|
||||
# initialize all $meta attributes which might be relevant for
|
||||
# file2table
|
||||
|
||||
return $self->SUPER::bootstrap_table_meta($dbh, $meta, $table);
|
||||
}
|
||||
|
||||
sub init_table_meta
|
||||
{
|
||||
my ( $self, $dbh, $meta, $table ) = @_;
|
||||
|
||||
# called after $meta contains the results from file2table
|
||||
# initialize all missing $meta attributes
|
||||
|
||||
$self->SUPER::init_table_meta( $dbh, $meta, $table );
|
||||
}
|
||||
|
||||
In case C<DBD::File::Table::open_file> doesn't open the files as the driver
|
||||
needs that, override it!
|
||||
|
||||
sub open_file
|
||||
{
|
||||
my ( $self, $meta, $attrs, $flags ) = @_;
|
||||
# ensure that $meta->{f_dontopen} is set
|
||||
$self->SUPER::open_file( $meta, $attrs, $flags );
|
||||
# now do what ever needs to be done
|
||||
}
|
||||
|
||||
Combined with the methods implemented using the L<SQL::Statement::Embed>
|
||||
guide, the table is full working and you could try a start over.
|
||||
|
||||
=head2 User comfort
|
||||
|
||||
C<DBD::File> since C<0.39> consolidates all persistent meta data of a table
|
||||
into a single structure stored in C<< $dbh->{f_meta} >>. With C<DBD::File>
|
||||
version C<0.41> and C<DBI::DBD::SqlEngine> version C<0.05>, this
|
||||
consolidation moves to L<DBI::DBD::SqlEngine>. It's still the
|
||||
C<< $dbh->{$drv_prefix . "_meta"} >> attribute which cares, so what you
|
||||
learned at this place before, is still valid.
|
||||
|
||||
sub init_valid_attributes
|
||||
{
|
||||
my $dbh = $_[0];
|
||||
|
||||
$dbh->SUPER::init_valid_attributes ();
|
||||
|
||||
$dbh->{foo_valid_attrs} = { ... };
|
||||
$dbh->{foo_readonly_attrs} = { ... };
|
||||
|
||||
$dbh->{foo_meta} = "foo_tables";
|
||||
|
||||
return $dbh;
|
||||
}
|
||||
|
||||
See updates at L<DBI::DBD::SqlEngine::HowTo/User comfort>.
|
||||
|
||||
=head2 Testing
|
||||
|
||||
Now you should have your own DBD::File based driver. Was easy, wasn't it?
|
||||
But does it work well? Prove it by writing tests and remember to use
|
||||
dbd_edit_mm_attribs from L<DBI::DBD> to ensure testing even rare cases.
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
This guide is written by Jens Rehsack. DBD::File is written by Jochen
|
||||
Wiedmann and Jeff Zucker.
|
||||
|
||||
The module DBD::File is currently maintained by
|
||||
|
||||
H.Merijn Brand < h.m.brand at xs4all.nl > and
|
||||
Jens Rehsack < rehsack at googlemail.com >
|
||||
|
||||
=head1 COPYRIGHT AND LICENSE
|
||||
|
||||
Copyright (C) 2010 by H.Merijn Brand & Jens Rehsack
|
||||
|
||||
All rights reserved.
|
||||
|
||||
You may freely distribute and/or modify this module under the terms of
|
||||
either the GNU General Public License (GPL) or the Artistic License, as
|
||||
specified in the Perl README file.
|
||||
|
||||
=cut
|
|
@ -0,0 +1,176 @@
|
|||
=head1 NAME
|
||||
|
||||
DBD::File::Roadmap - Planned Enhancements for DBD::File and pure Perl DBD's
|
||||
|
||||
Jens Rehsack - May 2010
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
This document gives a high level overview of the future of the DBD::File DBI
|
||||
driver and groundwork for pure Perl DBI drivers.
|
||||
|
||||
The planned enhancements cover features, testing, performance, reliability,
|
||||
extensibility and more.
|
||||
|
||||
=head1 CHANGES AND ENHANCEMENTS
|
||||
|
||||
=head2 Features
|
||||
|
||||
There are some features missing we would like to add, but there is
|
||||
no time plan:
|
||||
|
||||
=over 4
|
||||
|
||||
=item LOCK TABLE
|
||||
|
||||
The newly implemented internal common table meta storage area would allow
|
||||
us to implement LOCK TABLE support based on file system C<flock ()>
|
||||
support.
|
||||
|
||||
=item Transaction support
|
||||
|
||||
While DBD::AnyData recommends explicitly committing by importing and
|
||||
exporting tables, DBD::File might be enhanced in a future version to allow
|
||||
transparent transactions using the temporary tables of SQL::Statement as
|
||||
shadow (dirty) tables.
|
||||
|
||||
Transaction support will heavily rely on lock table support.
|
||||
|
||||
=item Data Dictionary Persistence
|
||||
|
||||
SQL::Statement provides dictionary information when a "CREATE TABLE ..."
|
||||
statement is executed. This dictionary is preserved for some statement
|
||||
handle attribute fetches (as C<NULLABLE> or C<PRECISION>).
|
||||
|
||||
It is planned to extend DBD::File to support data dictionaries to work
|
||||
on the tables in it. It is not planned to support one table in different
|
||||
dictionaries, but you can have several dictionaries in one directory.
|
||||
|
||||
=item SQL Engine selecting on connect
|
||||
|
||||
Currently the SQL engine selected is chosen during the loading of the module
|
||||
L<DBI::SQL::Nano>. Ideally end users should be able to select the engine
|
||||
used in C<< DBI->connect () >> with a special DBD::File attribute.
|
||||
|
||||
=back
|
||||
|
||||
Other points of view to the planned features (and more features for the
|
||||
SQL::Statement engine) are shown in L<SQL::Statement::Roadmap>.
|
||||
|
||||
=head2 Testing
|
||||
|
||||
DBD::File and the dependent DBD::DBM requires a lot more automated tests
|
||||
covering API stability and compatibility with optional modules
|
||||
like SQL::Statement.
|
||||
|
||||
=head2 Performance
|
||||
|
||||
Several arguments for support of features like indexes on columns
|
||||
and cursors are made for DBD::CSV (which is a DBD::File based driver,
|
||||
too). Similar arguments could be made for DBD::DBM, DBD::AnyData,
|
||||
DBD::RAM or DBD::PO etc.
|
||||
|
||||
To improve the performance of the underlying SQL engines, a clean
|
||||
re-implementation seems to be required. Currently both engines are
|
||||
prematurely optimized and therefore it is not trivial to provide
|
||||
further optimization without the risk of breaking existing features.
|
||||
|
||||
Join the DBI developers IRC channel at L<irc://irc.perl.org/dbi> to
|
||||
participate or post to the DBI Developers Mailing List.
|
||||
|
||||
=head2 Reliability
|
||||
|
||||
DBD::File currently lacks the following points:
|
||||
|
||||
=over 4
|
||||
|
||||
=item duplicate table names
|
||||
|
||||
It is currently possible to access a table quoted with a relative path
|
||||
(a) and additionally using an absolute path (b). If (a) and (b) are
|
||||
the same file that is not recognized (except for
|
||||
flock protection handled by the Operating System) and two independent
|
||||
tables are handled.
|
||||
|
||||
=item invalid table names
|
||||
|
||||
The current implementation does not prevent someone choosing a
|
||||
directory name as a physical file name for the table to open.
|
||||
|
||||
=back
|
||||
|
||||
=head2 Extensibility
|
||||
|
||||
I (Jens Rehsack) have some (partially for example only) DBD's in mind:
|
||||
|
||||
=over 4
|
||||
|
||||
=item DBD::Sys
|
||||
|
||||
Derive DBD::Sys from a common code base shared with DBD::File which handles
|
||||
all the emulation DBI needs (as getinfo, SQL engine handling, ...)
|
||||
|
||||
=item DBD::Dir
|
||||
|
||||
Provide a DBD::File derived to work with fixed table definitions through the
|
||||
file system to demonstrate how DBI / Pure Perl DBDs could handle databases
|
||||
with hierarchical structures.
|
||||
|
||||
=item DBD::Join
|
||||
|
||||
Provide a DBI driver which is able to manage multiple connections to other
|
||||
Databases (as DBD::Multiplex), but allow them to point to different data
|
||||
sources and allow joins between the tables of them:
|
||||
|
||||
# Example
|
||||
# Let table 'lsof' being a table in DBD::Sys giving a list of open files using lsof utility
|
||||
# Let table 'dir' being a atable from DBD::Dir
|
||||
$sth = $dbh->prepare( "select * from dir,lsof where path='/documents' and dir.entry = lsof.filename" )
|
||||
$sth->execute(); # gives all open files in '/documents'
|
||||
...
|
||||
|
||||
# Let table 'filesys' a DBD::Sys table of known file systems on current host
|
||||
# Let table 'applications' a table of your Configuration Management Database
|
||||
# where current applications (relocatable, with mountpoints for filesystems)
|
||||
# are stored
|
||||
$sth = dbh->prepare( "select * from applications,filesys where " .
|
||||
"application.mountpoint = filesys.mountpoint and ".
|
||||
"filesys.mounted is true" );
|
||||
$sth->execute(); # gives all currently mounted applications on this host
|
||||
|
||||
=back
|
||||
|
||||
=head1 PRIORITIES
|
||||
|
||||
Our priorities are focused on current issues. Initially many new test
|
||||
cases for DBD::File and DBD::DBM should be added to the DBI test
|
||||
suite. After that some additional documentation on how to use the
|
||||
DBD::File API will be provided.
|
||||
|
||||
Any additional priorities will come later and can be modified by (paying)
|
||||
users.
|
||||
|
||||
=head1 RESOURCES AND CONTRIBUTIONS
|
||||
|
||||
See L<http://dbi.perl.org/contributing> for I<how you can help>.
|
||||
|
||||
If your company has benefited from DBI, please consider if
|
||||
it could make a donation to The Perl Foundation "DBI Development"
|
||||
fund at L<http://dbi.perl.org/donate> to secure future development.
|
||||
|
||||
Alternatively, if your company would benefit from a specific new
|
||||
DBI feature, please consider sponsoring it's development through
|
||||
the options listed in the section "Commercial Support from the Author"
|
||||
on L<http://dbi.perl.org/support/>.
|
||||
|
||||
Using such targeted financing allows you to contribute to DBI
|
||||
development and rapidly get something specific and directly valuable
|
||||
to you in return.
|
||||
|
||||
My company also offers annual support contracts for the DBI, which
|
||||
provide another way to support the DBI and get something specific
|
||||
in return. Contact me for details.
|
||||
|
||||
Thank you.
|
||||
|
||||
=cut
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,162 @@
|
|||
package DBD::Gofer::Policy::Base;
|
||||
|
||||
# $Id: Base.pm 10087 2007-10-16 12:42:37Z Tim $
|
||||
#
|
||||
# Copyright (c) 2007, Tim Bunce, Ireland
|
||||
#
|
||||
# You may distribute under the terms of either the GNU General Public
|
||||
# License or the Artistic License, as specified in the Perl README file.
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use Carp;
|
||||
|
||||
our $VERSION = "0.010088";
|
||||
our $AUTOLOAD;
|
||||
|
||||
my %policy_defaults = (
|
||||
# force connect method (unless overridden by go_connect_method=>'...' attribute)
|
||||
# if false: call same method on client as on server
|
||||
connect_method => 'connect',
|
||||
# force prepare method (unless overridden by go_prepare_method=>'...' attribute)
|
||||
# if false: call same method on client as on server
|
||||
prepare_method => 'prepare',
|
||||
skip_connect_check => 0,
|
||||
skip_default_methods => 0,
|
||||
skip_prepare_check => 0,
|
||||
skip_ping => 0,
|
||||
dbh_attribute_update => 'every',
|
||||
dbh_attribute_list => ['*'],
|
||||
locally_quote => 0,
|
||||
locally_quote_identifier => 0,
|
||||
cache_parse_trace_flags => 1,
|
||||
cache_parse_trace_flag => 1,
|
||||
cache_data_sources => 1,
|
||||
cache_type_info_all => 1,
|
||||
cache_tables => 0,
|
||||
cache_table_info => 0,
|
||||
cache_column_info => 0,
|
||||
cache_primary_key_info => 0,
|
||||
cache_foreign_key_info => 0,
|
||||
cache_statistics_info => 0,
|
||||
cache_get_info => 0,
|
||||
cache_func => 0,
|
||||
);
|
||||
|
||||
my $base_policy_file = $INC{"DBD/Gofer/Policy/Base.pm"};
|
||||
|
||||
__PACKAGE__->create_policy_subs(\%policy_defaults);
|
||||
|
||||
sub create_policy_subs {
|
||||
my ($class, $policy_defaults) = @_;
|
||||
|
||||
while ( my ($policy_name, $policy_default) = each %$policy_defaults) {
|
||||
my $policy_attr_name = "go_$policy_name";
|
||||
my $sub = sub {
|
||||
# $policy->foo($attr, ...)
|
||||
#carp "$policy_name($_[1],...)";
|
||||
# return the policy default value unless an attribute overrides it
|
||||
return (ref $_[1] && exists $_[1]->{$policy_attr_name})
|
||||
? $_[1]->{$policy_attr_name}
|
||||
: $policy_default;
|
||||
};
|
||||
no strict 'refs';
|
||||
*{$class . '::' . $policy_name} = $sub;
|
||||
}
|
||||
}
|
||||
|
||||
sub AUTOLOAD {
|
||||
carp "Unknown policy name $AUTOLOAD used";
|
||||
# only warn once
|
||||
no strict 'refs';
|
||||
*$AUTOLOAD = sub { undef };
|
||||
return undef;
|
||||
}
|
||||
|
||||
sub new {
|
||||
my ($class, $args) = @_;
|
||||
my $policy = {};
|
||||
bless $policy, $class;
|
||||
}
|
||||
|
||||
sub DESTROY { };
|
||||
|
||||
1;
|
||||
|
||||
=head1 NAME
|
||||
|
||||
DBD::Gofer::Policy::Base - Base class for DBD::Gofer policies
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
$dbh = DBI->connect("dbi:Gofer:transport=...;policy=...", ...)
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
DBD::Gofer can be configured via a 'policy' mechanism that allows you to
|
||||
fine-tune the number of round-trips to the Gofer server. The policies are
|
||||
grouped into classes (which may be subclassed) and referenced by the name of
|
||||
the class.
|
||||
|
||||
The L<DBD::Gofer::Policy::Base> class is the base class for all the policy
|
||||
classes and describes all the individual policy items.
|
||||
|
||||
The Base policy is not used directly. You should use a policy class derived from it.
|
||||
|
||||
=head1 POLICY CLASSES
|
||||
|
||||
Three policy classes are supplied with DBD::Gofer:
|
||||
|
||||
L<DBD::Gofer::Policy::pedantic> is most 'transparent' but slowest because it
|
||||
makes more round-trips to the Gofer server.
|
||||
|
||||
L<DBD::Gofer::Policy::classic> is a reasonable compromise - it's the default policy.
|
||||
|
||||
L<DBD::Gofer::Policy::rush> is fastest, but may require code changes in your applications.
|
||||
|
||||
Generally the default C<classic> policy is fine. When first testing an existing
|
||||
application with Gofer it is a good idea to start with the C<pedantic> policy
|
||||
first and then switch to C<classic> or a custom policy, for final testing.
|
||||
|
||||
=head1 POLICY ITEMS
|
||||
|
||||
These are temporary docs: See the source code for list of policies and their defaults.
|
||||
|
||||
In a future version the policies and their defaults will be defined in the pod and parsed out at load-time.
|
||||
|
||||
See the source code to this module for more details.
|
||||
|
||||
=head1 POLICY CUSTOMIZATION
|
||||
|
||||
XXX This area of DBD::Gofer is subject to change.
|
||||
|
||||
There are three ways to customize policies:
|
||||
|
||||
Policy classes are designed to influence the overall behaviour of DBD::Gofer
|
||||
with existing, unaltered programs, so they work in a reasonably optimal way
|
||||
without requiring code changes. You can implement new policy classes as
|
||||
subclasses of existing policies.
|
||||
|
||||
In many cases individual policy items can be overridden on a case-by-case basis
|
||||
within your application code. You do this by passing a corresponding
|
||||
C<<go_<policy_name>>> attribute into DBI methods by your application code.
|
||||
This let's you fine-tune the behaviour for special cases.
|
||||
|
||||
The policy items are implemented as methods. In many cases the methods are
|
||||
passed parameters relating to the DBD::Gofer code being executed. This means
|
||||
the policy can implement dynamic behaviour that varies depending on the
|
||||
particular circumstances, such as the particular statement being executed.
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Tim Bunce, L<http://www.tim.bunce.name>
|
||||
|
||||
=head1 LICENCE AND COPYRIGHT
|
||||
|
||||
Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
|
||||
|
||||
This module is free software; you can redistribute it and/or
|
||||
modify it under the same terms as Perl itself. See L<perlartistic>.
|
||||
|
||||
=cut
|
||||
|
|
@ -0,0 +1,79 @@
|
|||
package DBD::Gofer::Policy::classic;
|
||||
|
||||
# $Id: classic.pm 10087 2007-10-16 12:42:37Z Tim $
|
||||
#
|
||||
# Copyright (c) 2007, Tim Bunce, Ireland
|
||||
#
|
||||
# You may distribute under the terms of either the GNU General Public
|
||||
# License or the Artistic License, as specified in the Perl README file.
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
our $VERSION = "0.010088";
|
||||
|
||||
use base qw(DBD::Gofer::Policy::Base);
|
||||
|
||||
__PACKAGE__->create_policy_subs({
|
||||
|
||||
# always use connect_cached on server
|
||||
connect_method => 'connect_cached',
|
||||
|
||||
# use same methods on server as is called on client
|
||||
prepare_method => '',
|
||||
|
||||
# don't skip the connect check since that also sets dbh attributes
|
||||
# although this makes connect more expensive, that's partly offset
|
||||
# by skip_ping=>1 below, which makes connect_cached very fast.
|
||||
skip_connect_check => 0,
|
||||
|
||||
# most code doesn't rely on sth attributes being set after prepare
|
||||
skip_prepare_check => 1,
|
||||
|
||||
# we're happy to use local method if that's the same as the remote
|
||||
skip_default_methods => 1,
|
||||
|
||||
# ping is not important for DBD::Gofer and most transports
|
||||
skip_ping => 1,
|
||||
|
||||
# only update dbh attributes on first contact with server
|
||||
dbh_attribute_update => 'first',
|
||||
|
||||
# we'd like to set locally_* but can't because drivers differ
|
||||
|
||||
# get_info results usually don't change
|
||||
cache_get_info => 1,
|
||||
});
|
||||
|
||||
|
||||
1;
|
||||
|
||||
=head1 NAME
|
||||
|
||||
DBD::Gofer::Policy::classic - The 'classic' policy for DBD::Gofer
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
$dbh = DBI->connect("dbi:Gofer:transport=...;policy=classic", ...)
|
||||
|
||||
The C<classic> policy is the default DBD::Gofer policy, so need not be included in the DSN.
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
Temporary docs: See the source code for list of policies and their defaults.
|
||||
|
||||
In a future version the policies and their defaults will be defined in the pod and parsed out at load-time.
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Tim Bunce, L<http://www.tim.bunce.name>
|
||||
|
||||
=head1 LICENCE AND COPYRIGHT
|
||||
|
||||
Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
|
||||
|
||||
This module is free software; you can redistribute it and/or
|
||||
modify it under the same terms as Perl itself. See L<perlartistic>.
|
||||
|
||||
=cut
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
package DBD::Gofer::Policy::pedantic;
|
||||
|
||||
# $Id: pedantic.pm 10087 2007-10-16 12:42:37Z Tim $
|
||||
#
|
||||
# Copyright (c) 2007, Tim Bunce, Ireland
|
||||
#
|
||||
# You may distribute under the terms of either the GNU General Public
|
||||
# License or the Artistic License, as specified in the Perl README file.
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
our $VERSION = "0.010088";
|
||||
|
||||
use base qw(DBD::Gofer::Policy::Base);
|
||||
|
||||
# the 'pedantic' policy is the same as the Base policy
|
||||
|
||||
1;
|
||||
|
||||
=head1 NAME
|
||||
|
||||
DBD::Gofer::Policy::pedantic - The 'pedantic' policy for DBD::Gofer
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
$dbh = DBI->connect("dbi:Gofer:transport=...;policy=pedantic", ...)
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
The C<pedantic> policy tries to be as transparent as possible. To do this it
|
||||
makes round-trips to the server for almost every DBI method call.
|
||||
|
||||
This is the best policy to use when first testing existing code with Gofer.
|
||||
Once it's working well you should consider moving to the C<classic> policy or defining your own policy class.
|
||||
|
||||
Temporary docs: See the source code for list of policies and their defaults.
|
||||
|
||||
In a future version the policies and their defaults will be defined in the pod and parsed out at load-time.
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Tim Bunce, L<http://www.tim.bunce.name>
|
||||
|
||||
=head1 LICENCE AND COPYRIGHT
|
||||
|
||||
Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
|
||||
|
||||
This module is free software; you can redistribute it and/or
|
||||
modify it under the same terms as Perl itself. See L<perlartistic>.
|
||||
|
||||
=cut
|
||||
|
|
@ -0,0 +1,90 @@
|
|||
package DBD::Gofer::Policy::rush;
|
||||
|
||||
# $Id: rush.pm 10087 2007-10-16 12:42:37Z Tim $
|
||||
#
|
||||
# Copyright (c) 2007, Tim Bunce, Ireland
|
||||
#
|
||||
# You may distribute under the terms of either the GNU General Public
|
||||
# License or the Artistic License, as specified in the Perl README file.
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
our $VERSION = "0.010088";
|
||||
|
||||
use base qw(DBD::Gofer::Policy::Base);
|
||||
|
||||
__PACKAGE__->create_policy_subs({
|
||||
|
||||
# always use connect_cached on server
|
||||
connect_method => 'connect_cached',
|
||||
|
||||
# use same methods on server as is called on client
|
||||
# (because code not using placeholders would bloat the sth cache)
|
||||
prepare_method => '',
|
||||
|
||||
# Skipping the connect check is fast, but it also skips
|
||||
# fetching the remote dbh attributes!
|
||||
# Make sure that your application doesn't need access to dbh attributes.
|
||||
skip_connect_check => 1,
|
||||
|
||||
# most code doesn't rely on sth attributes being set after prepare
|
||||
skip_prepare_check => 1,
|
||||
|
||||
# we're happy to use local method if that's the same as the remote
|
||||
skip_default_methods => 1,
|
||||
|
||||
# ping is almost meaningless for DBD::Gofer and most transports anyway
|
||||
skip_ping => 1,
|
||||
|
||||
# don't update dbh attributes at all
|
||||
# XXX actually we currently need dbh_attribute_update for skip_default_methods to work
|
||||
# and skip_default_methods is more valuable to us than the cost of dbh_attribute_update
|
||||
dbh_attribute_update => 'none', # actually means 'first' currently
|
||||
#dbh_attribute_list => undef,
|
||||
|
||||
# we'd like to set locally_* but can't because drivers differ
|
||||
|
||||
# in a rush assume metadata doesn't change
|
||||
cache_tables => 1,
|
||||
cache_table_info => 1,
|
||||
cache_column_info => 1,
|
||||
cache_primary_key_info => 1,
|
||||
cache_foreign_key_info => 1,
|
||||
cache_statistics_info => 1,
|
||||
cache_get_info => 1,
|
||||
});
|
||||
|
||||
|
||||
1;
|
||||
|
||||
=head1 NAME
|
||||
|
||||
DBD::Gofer::Policy::rush - The 'rush' policy for DBD::Gofer
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
$dbh = DBI->connect("dbi:Gofer:transport=...;policy=rush", ...)
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
The C<rush> policy tries to make as few round-trips as possible.
|
||||
It's the opposite end of the policy spectrum to the C<pedantic> policy.
|
||||
|
||||
Temporary docs: See the source code for list of policies and their defaults.
|
||||
|
||||
In a future version the policies and their defaults will be defined in the pod and parsed out at load-time.
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Tim Bunce, L<http://www.tim.bunce.name>
|
||||
|
||||
=head1 LICENCE AND COPYRIGHT
|
||||
|
||||
Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
|
||||
|
||||
This module is free software; you can redistribute it and/or
|
||||
modify it under the same terms as Perl itself. See L<perlartistic>.
|
||||
|
||||
=cut
|
||||
|
|
@ -0,0 +1,410 @@
|
|||
package DBD::Gofer::Transport::Base;
|
||||
|
||||
# $Id: Base.pm 14120 2010-06-07 19:52:19Z H.Merijn $
|
||||
#
|
||||
# Copyright (c) 2007, Tim Bunce, Ireland
|
||||
#
|
||||
# You may distribute under the terms of either the GNU General Public
|
||||
# License or the Artistic License, as specified in the Perl README file.
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use base qw(DBI::Gofer::Transport::Base);
|
||||
|
||||
our $VERSION = "0.014121";
|
||||
|
||||
__PACKAGE__->mk_accessors(qw(
|
||||
trace
|
||||
go_dsn
|
||||
go_url
|
||||
go_policy
|
||||
go_timeout
|
||||
go_retry_hook
|
||||
go_retry_limit
|
||||
go_cache
|
||||
cache_hit
|
||||
cache_miss
|
||||
cache_store
|
||||
));
|
||||
__PACKAGE__->mk_accessors_using(make_accessor_autoviv_hashref => qw(
|
||||
meta
|
||||
));
|
||||
|
||||
|
||||
sub new {
|
||||
my ($class, $args) = @_;
|
||||
$args->{$_} = 0 for (qw(cache_hit cache_miss cache_store));
|
||||
$args->{keep_meta_frozen} ||= 1 if $args->{go_cache};
|
||||
#warn "args @{[ %$args ]}\n";
|
||||
return $class->SUPER::new($args);
|
||||
}
|
||||
|
||||
|
||||
sub _init_trace { $ENV{DBD_GOFER_TRACE} || 0 }
|
||||
|
||||
|
||||
sub new_response {
|
||||
my $self = shift;
|
||||
return DBI::Gofer::Response->new(@_);
|
||||
}
|
||||
|
||||
|
||||
sub transmit_request {
|
||||
my ($self, $request) = @_;
|
||||
my $trace = $self->trace;
|
||||
my $response;
|
||||
|
||||
my ($go_cache, $request_cache_key);
|
||||
if ($go_cache = $self->{go_cache}) {
|
||||
$request_cache_key
|
||||
= $request->{meta}{request_cache_key}
|
||||
= $self->get_cache_key_for_request($request);
|
||||
if ($request_cache_key) {
|
||||
my $frozen_response = eval { $go_cache->get($request_cache_key) };
|
||||
if ($frozen_response) {
|
||||
$self->_dump("cached response found for ".ref($request), $request)
|
||||
if $trace;
|
||||
$response = $self->thaw_response($frozen_response);
|
||||
$self->trace_msg("transmit_request is returning a response from cache $go_cache\n")
|
||||
if $trace;
|
||||
++$self->{cache_hit};
|
||||
return $response;
|
||||
}
|
||||
warn $@ if $@;
|
||||
++$self->{cache_miss};
|
||||
$self->trace_msg("transmit_request cache miss\n")
|
||||
if $trace;
|
||||
}
|
||||
}
|
||||
|
||||
my $to = $self->go_timeout;
|
||||
my $transmit_sub = sub {
|
||||
$self->trace_msg("transmit_request\n") if $trace;
|
||||
local $SIG{ALRM} = sub { die "TIMEOUT\n" } if $to;
|
||||
|
||||
my $response = eval {
|
||||
local $SIG{PIPE} = sub {
|
||||
my $extra = ($! eq "Broken pipe") ? "" : " ($!)";
|
||||
die "Unable to send request: Broken pipe$extra\n";
|
||||
};
|
||||
alarm($to) if $to;
|
||||
$self->transmit_request_by_transport($request);
|
||||
};
|
||||
alarm(0) if $to;
|
||||
|
||||
if ($@) {
|
||||
return $self->transport_timedout("transmit_request", $to)
|
||||
if $@ eq "TIMEOUT\n";
|
||||
return $self->new_response({ err => 1, errstr => $@ });
|
||||
}
|
||||
|
||||
return $response;
|
||||
};
|
||||
|
||||
$response = $self->_transmit_request_with_retries($request, $transmit_sub);
|
||||
|
||||
if ($response) {
|
||||
my $frozen_response = delete $response->{meta}{frozen};
|
||||
$self->_store_response_in_cache($frozen_response, $request_cache_key)
|
||||
if $request_cache_key;
|
||||
}
|
||||
|
||||
$self->trace_msg("transmit_request is returning a response itself\n")
|
||||
if $trace && $response;
|
||||
|
||||
return $response unless wantarray;
|
||||
return ($response, $transmit_sub);
|
||||
}
|
||||
|
||||
|
||||
sub _transmit_request_with_retries {
|
||||
my ($self, $request, $transmit_sub) = @_;
|
||||
my $response;
|
||||
do {
|
||||
$response = $transmit_sub->();
|
||||
} while ( $response && $self->response_needs_retransmit($request, $response) );
|
||||
return $response;
|
||||
}
|
||||
|
||||
|
||||
sub receive_response {
|
||||
my ($self, $request, $retransmit_sub) = @_;
|
||||
my $to = $self->go_timeout;
|
||||
|
||||
my $receive_sub = sub {
|
||||
$self->trace_msg("receive_response\n");
|
||||
local $SIG{ALRM} = sub { die "TIMEOUT\n" } if $to;
|
||||
|
||||
my $response = eval {
|
||||
alarm($to) if $to;
|
||||
$self->receive_response_by_transport($request);
|
||||
};
|
||||
alarm(0) if $to;
|
||||
|
||||
if ($@) {
|
||||
return $self->transport_timedout("receive_response", $to)
|
||||
if $@ eq "TIMEOUT\n";
|
||||
return $self->new_response({ err => 1, errstr => $@ });
|
||||
}
|
||||
return $response;
|
||||
};
|
||||
|
||||
my $response;
|
||||
do {
|
||||
$response = $receive_sub->();
|
||||
if ($self->response_needs_retransmit($request, $response)) {
|
||||
$response = $self->_transmit_request_with_retries($request, $retransmit_sub);
|
||||
$response ||= $receive_sub->();
|
||||
}
|
||||
} while ( $self->response_needs_retransmit($request, $response) );
|
||||
|
||||
if ($response) {
|
||||
my $frozen_response = delete $response->{meta}{frozen};
|
||||
my $request_cache_key = $request->{meta}{request_cache_key};
|
||||
$self->_store_response_in_cache($frozen_response, $request_cache_key)
|
||||
if $request_cache_key && $self->{go_cache};
|
||||
}
|
||||
|
||||
return $response;
|
||||
}
|
||||
|
||||
|
||||
sub response_retry_preference {
|
||||
my ($self, $request, $response) = @_;
|
||||
|
||||
# give the user a chance to express a preference (or undef for default)
|
||||
if (my $go_retry_hook = $self->go_retry_hook) {
|
||||
my $retry = $go_retry_hook->($request, $response, $self);
|
||||
$self->trace_msg(sprintf "go_retry_hook returned %s\n",
|
||||
(defined $retry) ? $retry : 'undef');
|
||||
return $retry if defined $retry;
|
||||
}
|
||||
|
||||
# This is the main decision point. We don't retry requests that got
|
||||
# as far as executing because the error is probably from the database
|
||||
# (not transport) so retrying is unlikely to help. But note that any
|
||||
# severe transport error occurring after execute is likely to return
|
||||
# a new response object that doesn't have the execute flag set. Beware!
|
||||
return 0 if $response->executed_flag_set;
|
||||
|
||||
return 1 if ($response->errstr || '') =~ m/induced by DBI_GOFER_RANDOM/;
|
||||
|
||||
return 1 if $request->is_idempotent; # i.e. is SELECT or ReadOnly was set
|
||||
|
||||
return undef; # we couldn't make up our mind
|
||||
}
|
||||
|
||||
|
||||
sub response_needs_retransmit {
|
||||
my ($self, $request, $response) = @_;
|
||||
|
||||
my $err = $response->err
|
||||
or return 0; # nothing went wrong
|
||||
|
||||
my $retry = $self->response_retry_preference($request, $response);
|
||||
|
||||
if (!$retry) { # false or undef
|
||||
$self->trace_msg("response_needs_retransmit: response not suitable for retry\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
# we'd like to retry but have we retried too much already?
|
||||
|
||||
my $retry_limit = $self->go_retry_limit;
|
||||
if (!$retry_limit) {
|
||||
$self->trace_msg("response_needs_retransmit: retries disabled (retry_limit not set)\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
my $request_meta = $request->meta;
|
||||
my $retry_count = $request_meta->{retry_count} || 0;
|
||||
if ($retry_count >= $retry_limit) {
|
||||
$self->trace_msg("response_needs_retransmit: $retry_count is too many retries\n");
|
||||
# XXX should be possible to disable altering the err
|
||||
$response->errstr(sprintf "%s (after %d retries by gofer)", $response->errstr, $retry_count);
|
||||
return 0;
|
||||
}
|
||||
|
||||
# will retry now, do the admin
|
||||
++$retry_count;
|
||||
$self->trace_msg("response_needs_retransmit: retry $retry_count\n");
|
||||
|
||||
# hook so response_retry_preference can defer some code execution
|
||||
# until we've checked retry_count and retry_limit.
|
||||
if (ref $retry eq 'CODE') {
|
||||
$retry->($retry_count, $retry_limit)
|
||||
and warn "should return false"; # protect future use
|
||||
}
|
||||
|
||||
++$request_meta->{retry_count}; # update count for this request object
|
||||
++$self->meta->{request_retry_count}; # update cumulative transport stats
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
sub transport_timedout {
|
||||
my ($self, $method, $timeout) = @_;
|
||||
$timeout ||= $self->go_timeout;
|
||||
return $self->new_response({ err => 1, errstr => "DBD::Gofer $method timed-out after $timeout seconds" });
|
||||
}
|
||||
|
||||
|
||||
# return undef if we don't want to cache this request
|
||||
# subclasses may use more specialized rules
|
||||
sub get_cache_key_for_request {
|
||||
my ($self, $request) = @_;
|
||||
|
||||
# we only want to cache idempotent requests
|
||||
# is_idempotent() is true if GOf_REQUEST_IDEMPOTENT or GOf_REQUEST_READONLY set
|
||||
return undef if not $request->is_idempotent;
|
||||
|
||||
# XXX would be nice to avoid the extra freeze here
|
||||
my $key = $self->freeze_request($request, undef, 1);
|
||||
|
||||
#use Digest::MD5; warn "get_cache_key_for_request: ".Digest::MD5::md5_base64($key)."\n";
|
||||
|
||||
return $key;
|
||||
}
|
||||
|
||||
|
||||
sub _store_response_in_cache {
|
||||
my ($self, $frozen_response, $request_cache_key) = @_;
|
||||
my $go_cache = $self->{go_cache}
|
||||
or return;
|
||||
|
||||
# new() ensures that enabling go_cache also enables keep_meta_frozen
|
||||
warn "No meta frozen in response" if !$frozen_response;
|
||||
warn "No request_cache_key" if !$request_cache_key;
|
||||
|
||||
if ($frozen_response && $request_cache_key) {
|
||||
$self->trace_msg("receive_response added response to cache $go_cache\n");
|
||||
eval { $go_cache->set($request_cache_key, $frozen_response) };
|
||||
warn $@ if $@;
|
||||
++$self->{cache_store};
|
||||
}
|
||||
}
|
||||
|
||||
1;
|
||||
|
||||
__END__
|
||||
|
||||
=head1 NAME
|
||||
|
||||
DBD::Gofer::Transport::Base - base class for DBD::Gofer client transports
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
my $remote_dsn = "..."
|
||||
DBI->connect("dbi:Gofer:transport=...;url=...;timeout=...;retry_limit=...;dsn=$remote_dsn",...)
|
||||
|
||||
or, enable by setting the DBI_AUTOPROXY environment variable:
|
||||
|
||||
export DBI_AUTOPROXY='dbi:Gofer:transport=...;url=...'
|
||||
|
||||
which will force I<all> DBI connections to be made via that Gofer server.
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
This is the base class for all DBD::Gofer client transports.
|
||||
|
||||
=head1 ATTRIBUTES
|
||||
|
||||
Gofer transport attributes can be specified either in the attributes parameter
|
||||
of the connect() method call, or in the DSN string. When used in the DSN
|
||||
string, attribute names don't have the C<go_> prefix.
|
||||
|
||||
=head2 go_dsn
|
||||
|
||||
The full DBI DSN that the Gofer server should connect to on your behalf.
|
||||
|
||||
When used in the DSN it must be the last element in the DSN string.
|
||||
|
||||
=head2 go_timeout
|
||||
|
||||
A time limit for sending a request and receiving a response. Some drivers may
|
||||
implement sending and receiving as separate steps, in which case (currently)
|
||||
the timeout applies to each separately.
|
||||
|
||||
If a request needs to be resent then the timeout is restarted for each sending
|
||||
of a request and receiving of a response.
|
||||
|
||||
=head2 go_retry_limit
|
||||
|
||||
The maximum number of times an request may be retried. The default is 2.
|
||||
|
||||
=head2 go_retry_hook
|
||||
|
||||
This subroutine reference is called, if defined, for each response received where $response->err is true.
|
||||
|
||||
The subroutine is pass three parameters: the request object, the response object, and the transport object.
|
||||
|
||||
If it returns an undefined value then the default retry behaviour is used. See L</RETRY ON ERROR> below.
|
||||
|
||||
If it returns a defined but false value then the request is not resent.
|
||||
|
||||
If it returns true value then the request is resent, so long as the number of retries does not exceed C<go_retry_limit>.
|
||||
|
||||
=head1 RETRY ON ERROR
|
||||
|
||||
The default retry on error behaviour is:
|
||||
|
||||
- Retry if the error was due to DBI_GOFER_RANDOM. See L<DBI::Gofer::Execute>.
|
||||
|
||||
- Retry if $request->is_idempotent returns true. See L<DBI::Gofer::Request>.
|
||||
|
||||
A retry won't be allowed if the number of previous retries has reached C<go_retry_limit>.
|
||||
|
||||
=head1 TRACING
|
||||
|
||||
Tracing of gofer requests and responses can be enabled by setting the
|
||||
C<DBD_GOFER_TRACE> environment variable. A value of 1 gives a reasonably
|
||||
compact summary of each request and response. A value of 2 or more gives a
|
||||
detailed, and voluminous, dump.
|
||||
|
||||
The trace is written using DBI->trace_msg() and so is written to the default
|
||||
DBI trace output, which is usually STDERR.
|
||||
|
||||
=head1 METHODS
|
||||
|
||||
I<This section is currently far from complete.>
|
||||
|
||||
=head2 response_retry_preference
|
||||
|
||||
$retry = $transport->response_retry_preference($request, $response);
|
||||
|
||||
The response_retry_preference is called by DBD::Gofer when considering if a
|
||||
request should be retried after an error.
|
||||
|
||||
Returns true (would like to retry), false (must not retry), undef (no preference).
|
||||
|
||||
If a true value is returned in the form of a CODE ref then, if DBD::Gofer does
|
||||
decide to retry the request, it calls the code ref passing $retry_count, $retry_limit.
|
||||
Can be used for logging and/or to implement exponential backoff behaviour.
|
||||
Currently the called code must return using C<return;> to allow for future extensions.
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Tim Bunce, L<http://www.tim.bunce.name>
|
||||
|
||||
=head1 LICENCE AND COPYRIGHT
|
||||
|
||||
Copyright (c) 2007-2008, Tim Bunce, Ireland. All rights reserved.
|
||||
|
||||
This module is free software; you can redistribute it and/or
|
||||
modify it under the same terms as Perl itself. See L<perlartistic>.
|
||||
|
||||
=head1 SEE ALSO
|
||||
|
||||
L<DBD::Gofer>, L<DBI::Gofer::Request>, L<DBI::Gofer::Response>, L<DBI::Gofer::Execute>.
|
||||
|
||||
and some example transports:
|
||||
|
||||
L<DBD::Gofer::Transport::stream>
|
||||
|
||||
L<DBD::Gofer::Transport::http>
|
||||
|
||||
L<DBI::Gofer::Transport::mod_perl>
|
||||
|
||||
=cut
|
|
@ -0,0 +1,144 @@
|
|||
package DBD::Gofer::Transport::corostream;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use Carp;
|
||||
|
||||
use Coro::Select; # a slow but coro-aware replacement for CORE::select (global effect!)
|
||||
|
||||
use Coro;
|
||||
use Coro::Handle;
|
||||
|
||||
use base qw(DBD::Gofer::Transport::stream);
|
||||
|
||||
# XXX ensure DBI_PUREPERL for parent doesn't pass to child
|
||||
sub start_pipe_command {
|
||||
local $ENV{DBI_PUREPERL} = $ENV{DBI_PUREPERL_COROCHILD}; # typically undef
|
||||
my $connection = shift->SUPER::start_pipe_command(@_);
|
||||
return $connection;
|
||||
}
|
||||
|
||||
|
||||
|
||||
1;
|
||||
|
||||
__END__
|
||||
|
||||
=head1 NAME
|
||||
|
||||
DBD::Gofer::Transport::corostream - Async DBD::Gofer stream transport using Coro and AnyEvent
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
DBI_AUTOPROXY="dbi:Gofer:transport=corostream" perl some-perl-script-using-dbi.pl
|
||||
|
||||
or
|
||||
|
||||
$dsn = ...; # the DSN for the driver and database you want to use
|
||||
$dbh = DBI->connect("dbi:Gofer:transport=corostream;dsn=$dsn", ...);
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
The I<BIG WIN> from using L<Coro> is that it enables the use of existing
|
||||
DBI frameworks like L<DBIx::Class>.
|
||||
|
||||
=head1 KNOWN ISSUES AND LIMITATIONS
|
||||
|
||||
- Uses Coro::Select so alters CORE::select globally
|
||||
Parent class probably needs refactoring to enable a more encapsulated approach.
|
||||
|
||||
- Doesn't prevent multiple concurrent requests
|
||||
Probably just needs a per-connection semaphore
|
||||
|
||||
- Coro has many caveats. Caveat emptor.
|
||||
|
||||
=head1 STATUS
|
||||
|
||||
THIS IS CURRENTLY JUST A PROOF-OF-CONCEPT IMPLEMENTATION FOR EXPERIMENTATION.
|
||||
|
||||
Please note that I have no plans to develop this code further myself.
|
||||
I'd very much welcome contributions. Interested? Let me know!
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Tim Bunce, L<http://www.tim.bunce.name>
|
||||
|
||||
=head1 LICENCE AND COPYRIGHT
|
||||
|
||||
Copyright (c) 2010, Tim Bunce, Ireland. All rights reserved.
|
||||
|
||||
This module is free software; you can redistribute it and/or
|
||||
modify it under the same terms as Perl itself. See L<perlartistic>.
|
||||
|
||||
=head1 SEE ALSO
|
||||
|
||||
L<DBD::Gofer::Transport::stream>
|
||||
|
||||
L<DBD::Gofer>
|
||||
|
||||
=head1 APPENDIX
|
||||
|
||||
Example code:
|
||||
|
||||
#!perl
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use Time::HiRes qw(time);
|
||||
|
||||
BEGIN { $ENV{PERL_ANYEVENT_STRICT} = 1; $ENV{PERL_ANYEVENT_VERBOSE} = 1; }
|
||||
|
||||
use AnyEvent;
|
||||
|
||||
BEGIN { $ENV{DBI_TRACE} = 0; $ENV{DBI_GOFER_TRACE} = 0; $ENV{DBD_GOFER_TRACE} = 0; };
|
||||
|
||||
use DBI;
|
||||
|
||||
$ENV{DBI_AUTOPROXY} = 'dbi:Gofer:transport=corostream';
|
||||
|
||||
my $ticker = AnyEvent->timer( after => 0, interval => 0.1, cb => sub {
|
||||
warn sprintf "-tick- %.2f\n", time
|
||||
} );
|
||||
|
||||
warn "connecting...\n";
|
||||
my $dbh = DBI->connect("dbi:NullP:");
|
||||
warn "...connected\n";
|
||||
|
||||
for (1..3) {
|
||||
warn "entering DBI...\n";
|
||||
$dbh->do("sleep 0.3"); # pseudo-sql understood by the DBD::NullP driver
|
||||
warn "...returned\n";
|
||||
}
|
||||
|
||||
warn "done.";
|
||||
|
||||
Example output:
|
||||
|
||||
$ perl corogofer.pl
|
||||
connecting...
|
||||
-tick- 1293631437.14
|
||||
-tick- 1293631437.14
|
||||
...connected
|
||||
entering DBI...
|
||||
-tick- 1293631437.25
|
||||
-tick- 1293631437.35
|
||||
-tick- 1293631437.45
|
||||
-tick- 1293631437.55
|
||||
...returned
|
||||
entering DBI...
|
||||
-tick- 1293631437.66
|
||||
-tick- 1293631437.76
|
||||
-tick- 1293631437.86
|
||||
...returned
|
||||
entering DBI...
|
||||
-tick- 1293631437.96
|
||||
-tick- 1293631438.06
|
||||
-tick- 1293631438.16
|
||||
...returned
|
||||
done. at corogofer.pl line 39.
|
||||
|
||||
You can see that the timer callback is firing while the code 'waits' inside the
|
||||
do() method for the response from the database. Normally that would block.
|
||||
|
||||
=cut
|
|
@ -0,0 +1,111 @@
|
|||
package DBD::Gofer::Transport::null;
|
||||
|
||||
# $Id: null.pm 10087 2007-10-16 12:42:37Z Tim $
|
||||
#
|
||||
# Copyright (c) 2007, Tim Bunce, Ireland
|
||||
#
|
||||
# You may distribute under the terms of either the GNU General Public
|
||||
# License or the Artistic License, as specified in the Perl README file.
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use base qw(DBD::Gofer::Transport::Base);
|
||||
|
||||
use DBI::Gofer::Execute;
|
||||
|
||||
our $VERSION = "0.010088";
|
||||
|
||||
__PACKAGE__->mk_accessors(qw(
|
||||
pending_response
|
||||
transmit_count
|
||||
));
|
||||
|
||||
my $executor = DBI::Gofer::Execute->new();
|
||||
|
||||
|
||||
sub transmit_request_by_transport {
|
||||
my ($self, $request) = @_;
|
||||
$self->transmit_count( ($self->transmit_count()||0) + 1 ); # just for tests
|
||||
|
||||
my $frozen_request = $self->freeze_request($request);
|
||||
|
||||
# ...
|
||||
# the request is magically transported over to ... ourselves
|
||||
# ...
|
||||
|
||||
my $response = $executor->execute_request( $self->thaw_request($frozen_request, undef, 1) );
|
||||
|
||||
# put response 'on the shelf' ready for receive_response()
|
||||
$self->pending_response( $response );
|
||||
|
||||
return undef;
|
||||
}
|
||||
|
||||
|
||||
sub receive_response_by_transport {
|
||||
my $self = shift;
|
||||
|
||||
my $response = $self->pending_response;
|
||||
|
||||
my $frozen_response = $self->freeze_response($response, undef, 1);
|
||||
|
||||
# ...
|
||||
# the response is magically transported back to ... ourselves
|
||||
# ...
|
||||
|
||||
return $self->thaw_response($frozen_response);
|
||||
}
|
||||
|
||||
|
||||
1;
|
||||
__END__
|
||||
|
||||
=head1 NAME
|
||||
|
||||
DBD::Gofer::Transport::null - DBD::Gofer client transport for testing
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
my $original_dsn = "..."
|
||||
DBI->connect("dbi:Gofer:transport=null;dsn=$original_dsn",...)
|
||||
|
||||
or, enable by setting the DBI_AUTOPROXY environment variable:
|
||||
|
||||
export DBI_AUTOPROXY="dbi:Gofer:transport=null"
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
Connect via DBD::Gofer but execute the requests within the same process.
|
||||
|
||||
This is a quick and simple way to test applications for compatibility with the
|
||||
(few) restrictions that DBD::Gofer imposes.
|
||||
|
||||
It also provides a simple, portable way for the DBI test suite to be used to
|
||||
test DBD::Gofer on all platforms with no setup.
|
||||
|
||||
Also, by measuring the difference in performance between normal connections and
|
||||
connections via C<dbi:Gofer:transport=null> the basic cost of using DBD::Gofer
|
||||
can be measured. Furthermore, the additional cost of more advanced transports can be
|
||||
isolated by comparing their performance with the null transport.
|
||||
|
||||
The C<t/85gofer.t> script in the DBI distribution includes a comparative benchmark.
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Tim Bunce, L<http://www.tim.bunce.name>
|
||||
|
||||
=head1 LICENCE AND COPYRIGHT
|
||||
|
||||
Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
|
||||
|
||||
This module is free software; you can redistribute it and/or
|
||||
modify it under the same terms as Perl itself. See L<perlartistic>.
|
||||
|
||||
=head1 SEE ALSO
|
||||
|
||||
L<DBD::Gofer::Transport::Base>
|
||||
|
||||
L<DBD::Gofer>
|
||||
|
||||
=cut
|
|
@ -0,0 +1,253 @@
|
|||
package DBD::Gofer::Transport::pipeone;
|
||||
|
||||
# $Id: pipeone.pm 10087 2007-10-16 12:42:37Z Tim $
|
||||
#
|
||||
# Copyright (c) 2007, Tim Bunce, Ireland
|
||||
#
|
||||
# You may distribute under the terms of either the GNU General Public
|
||||
# License or the Artistic License, as specified in the Perl README file.
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use Carp;
|
||||
use Fcntl;
|
||||
use IO::Select;
|
||||
use IPC::Open3 qw(open3);
|
||||
use Symbol qw(gensym);
|
||||
|
||||
use base qw(DBD::Gofer::Transport::Base);
|
||||
|
||||
our $VERSION = "0.010088";
|
||||
|
||||
__PACKAGE__->mk_accessors(qw(
|
||||
connection_info
|
||||
go_perl
|
||||
));
|
||||
|
||||
|
||||
sub new {
|
||||
my ($self, $args) = @_;
|
||||
$args->{go_perl} ||= do {
|
||||
($INC{"blib.pm"}) ? [ $^X, '-Mblib' ] : [ $^X ];
|
||||
};
|
||||
if (not ref $args->{go_perl}) {
|
||||
# user can override the perl to be used, either with an array ref
|
||||
# containing the command name and args to use, or with a string
|
||||
# (ie via the DSN) in which case, to enable args to be passed,
|
||||
# we split on two or more consecutive spaces (otherwise the path
|
||||
# to perl couldn't contain a space itself).
|
||||
$args->{go_perl} = [ split /\s{2,}/, $args->{go_perl} ];
|
||||
}
|
||||
return $self->SUPER::new($args);
|
||||
}
|
||||
|
||||
|
||||
# nonblock($fh) puts filehandle into nonblocking mode
|
||||
sub nonblock {
|
||||
my $fh = shift;
|
||||
my $flags = fcntl($fh, F_GETFL, 0)
|
||||
or croak "Can't get flags for filehandle $fh: $!";
|
||||
fcntl($fh, F_SETFL, $flags | O_NONBLOCK)
|
||||
or croak "Can't make filehandle $fh nonblocking: $!";
|
||||
}
|
||||
|
||||
|
||||
sub start_pipe_command {
|
||||
my ($self, $cmd) = @_;
|
||||
$cmd = [ $cmd ] unless ref $cmd eq 'ARRAY';
|
||||
|
||||
# if it's important that the subprocess uses the same
|
||||
# (versions of) modules as us then the caller should
|
||||
# set PERL5LIB itself.
|
||||
|
||||
# limit various forms of insanity, for now
|
||||
local $ENV{DBI_TRACE}; # use DBI_GOFER_TRACE instead
|
||||
local $ENV{DBI_AUTOPROXY};
|
||||
local $ENV{DBI_PROFILE};
|
||||
|
||||
my ($wfh, $rfh, $efh) = (gensym, gensym, gensym);
|
||||
my $pid = open3($wfh, $rfh, $efh, @$cmd)
|
||||
or die "error starting @$cmd: $!\n";
|
||||
if ($self->trace) {
|
||||
$self->trace_msg(sprintf("Started pid $pid: @$cmd {fd: w%d r%d e%d, ppid=$$}\n", fileno $wfh, fileno $rfh, fileno $efh),0);
|
||||
}
|
||||
nonblock($rfh);
|
||||
nonblock($efh);
|
||||
my $ios = IO::Select->new($rfh, $efh);
|
||||
|
||||
return {
|
||||
cmd=>$cmd,
|
||||
pid=>$pid,
|
||||
wfh=>$wfh, rfh=>$rfh, efh=>$efh,
|
||||
ios=>$ios,
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
sub cmd_as_string {
|
||||
my $self = shift;
|
||||
# XXX meant to return a properly shell-escaped string suitable for system
|
||||
# but its only for debugging so that can wait
|
||||
my $connection_info = $self->connection_info;
|
||||
return join " ", map { (m/^[-:\w]*$/) ? $_ : "'$_'" } @{$connection_info->{cmd}};
|
||||
}
|
||||
|
||||
|
||||
sub transmit_request_by_transport {
|
||||
my ($self, $request) = @_;
|
||||
|
||||
my $frozen_request = $self->freeze_request($request);
|
||||
|
||||
my $cmd = [ @{$self->go_perl}, qw(-MDBI::Gofer::Transport::pipeone -e run_one_stdio)];
|
||||
my $info = $self->start_pipe_command($cmd);
|
||||
|
||||
my $wfh = delete $info->{wfh};
|
||||
# send frozen request
|
||||
local $\;
|
||||
print $wfh $frozen_request
|
||||
or warn "error writing to @$cmd: $!\n";
|
||||
# indicate that there's no more
|
||||
close $wfh
|
||||
or die "error closing pipe to @$cmd: $!\n";
|
||||
|
||||
$self->connection_info( $info );
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
sub read_response_from_fh {
|
||||
my ($self, $fh_actions) = @_;
|
||||
my $trace = $self->trace;
|
||||
|
||||
my $info = $self->connection_info || die;
|
||||
my ($ios) = @{$info}{qw(ios)};
|
||||
my $errors = 0;
|
||||
my $complete;
|
||||
|
||||
die "No handles to read response from" unless $ios->count;
|
||||
|
||||
while ($ios->count) {
|
||||
my @readable = $ios->can_read();
|
||||
for my $fh (@readable) {
|
||||
local $_;
|
||||
my $actions = $fh_actions->{$fh} || die "panic: no action for $fh";
|
||||
my $rv = sysread($fh, $_='', 1024*31); # to fit in 32KB slab
|
||||
unless ($rv) { # error (undef) or end of file (0)
|
||||
my $action;
|
||||
unless (defined $rv) { # was an error
|
||||
$self->trace_msg("error on handle $fh: $!\n") if $trace >= 4;
|
||||
$action = $actions->{error} || $actions->{eof};
|
||||
++$errors;
|
||||
# XXX an error may be a permenent condition of the handle
|
||||
# if so we'll loop here - not good
|
||||
}
|
||||
else {
|
||||
$action = $actions->{eof};
|
||||
$self->trace_msg("eof on handle $fh\n") if $trace >= 4;
|
||||
}
|
||||
if ($action->($fh)) {
|
||||
$self->trace_msg("removing $fh from handle set\n") if $trace >= 4;
|
||||
$ios->remove($fh);
|
||||
}
|
||||
next;
|
||||
}
|
||||
# action returns true if the response is now complete
|
||||
# (we finish all handles
|
||||
$actions->{read}->($fh) && ++$complete;
|
||||
}
|
||||
last if $complete;
|
||||
}
|
||||
return $errors;
|
||||
}
|
||||
|
||||
|
||||
sub receive_response_by_transport {
|
||||
my $self = shift;
|
||||
|
||||
my $info = $self->connection_info || die;
|
||||
my ($pid, $rfh, $efh, $ios, $cmd) = @{$info}{qw(pid rfh efh ios cmd)};
|
||||
|
||||
my $frozen_response;
|
||||
my $stderr_msg;
|
||||
|
||||
$self->read_response_from_fh( {
|
||||
$efh => {
|
||||
error => sub { warn "error reading response stderr: $!"; 1 },
|
||||
eof => sub { warn "eof on stderr" if 0; 1 },
|
||||
read => sub { $stderr_msg .= $_; 0 },
|
||||
},
|
||||
$rfh => {
|
||||
error => sub { warn "error reading response: $!"; 1 },
|
||||
eof => sub { warn "eof on stdout" if 0; 1 },
|
||||
read => sub { $frozen_response .= $_; 0 },
|
||||
},
|
||||
});
|
||||
|
||||
waitpid $info->{pid}, 0
|
||||
or warn "waitpid: $!"; # XXX do something more useful?
|
||||
|
||||
die ref($self)." command (@$cmd) failed: $stderr_msg"
|
||||
if not $frozen_response; # no output on stdout at all
|
||||
|
||||
# XXX need to be able to detect and deal with corruption
|
||||
my $response = $self->thaw_response($frozen_response);
|
||||
|
||||
if ($stderr_msg) {
|
||||
# add stderr messages as warnings (for PrintWarn)
|
||||
$response->add_err(0, $stderr_msg, undef, $self->trace)
|
||||
# but ignore warning from old version of blib
|
||||
unless $stderr_msg =~ /^Using .*blib/ && "@$cmd" =~ /-Mblib/;
|
||||
}
|
||||
|
||||
return $response;
|
||||
}
|
||||
|
||||
|
||||
1;
|
||||
|
||||
__END__
|
||||
|
||||
=head1 NAME
|
||||
|
||||
DBD::Gofer::Transport::pipeone - DBD::Gofer client transport for testing
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
$original_dsn = "...";
|
||||
DBI->connect("dbi:Gofer:transport=pipeone;dsn=$original_dsn",...)
|
||||
|
||||
or, enable by setting the DBI_AUTOPROXY environment variable:
|
||||
|
||||
export DBI_AUTOPROXY="dbi:Gofer:transport=pipeone"
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
Connect via DBD::Gofer and execute each request by starting executing a subprocess.
|
||||
|
||||
This is, as you might imagine, spectacularly inefficient!
|
||||
|
||||
It's only intended for testing. Specifically it demonstrates that the server
|
||||
side is completely stateless.
|
||||
|
||||
It also provides a base class for the much more useful L<DBD::Gofer::Transport::stream>
|
||||
transport.
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Tim Bunce, L<http://www.tim.bunce.name>
|
||||
|
||||
=head1 LICENCE AND COPYRIGHT
|
||||
|
||||
Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
|
||||
|
||||
This module is free software; you can redistribute it and/or
|
||||
modify it under the same terms as Perl itself. See L<perlartistic>.
|
||||
|
||||
=head1 SEE ALSO
|
||||
|
||||
L<DBD::Gofer::Transport::Base>
|
||||
|
||||
L<DBD::Gofer>
|
||||
|
||||
=cut
|
|
@ -0,0 +1,292 @@
|
|||
package DBD::Gofer::Transport::stream;
|
||||
|
||||
# $Id: stream.pm 14598 2010-12-21 22:53:25Z Tim $
|
||||
#
|
||||
# Copyright (c) 2007, Tim Bunce, Ireland
|
||||
#
|
||||
# You may distribute under the terms of either the GNU General Public
|
||||
# License or the Artistic License, as specified in the Perl README file.
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use Carp;
|
||||
|
||||
use base qw(DBD::Gofer::Transport::pipeone);
|
||||
|
||||
our $VERSION = "0.014599";
|
||||
|
||||
__PACKAGE__->mk_accessors(qw(
|
||||
go_persist
|
||||
));
|
||||
|
||||
my $persist_all = 5;
|
||||
my %persist;
|
||||
|
||||
|
||||
sub _connection_key {
|
||||
my ($self) = @_;
|
||||
return join "~", $self->go_url||"", @{ $self->go_perl || [] };
|
||||
}
|
||||
|
||||
|
||||
sub _connection_get {
|
||||
my ($self) = @_;
|
||||
|
||||
my $persist = $self->go_persist; # = 0 can force non-caching
|
||||
$persist = $persist_all if not defined $persist;
|
||||
my $key = ($persist) ? $self->_connection_key : '';
|
||||
if ($persist{$key} && $self->_connection_check($persist{$key})) {
|
||||
$self->trace_msg("reusing persistent connection $key\n",0) if $self->trace >= 1;
|
||||
return $persist{$key};
|
||||
}
|
||||
|
||||
my $connection = $self->_make_connection;
|
||||
|
||||
if ($key) {
|
||||
%persist = () if keys %persist > $persist_all; # XXX quick hack to limit subprocesses
|
||||
$persist{$key} = $connection;
|
||||
}
|
||||
|
||||
return $connection;
|
||||
}
|
||||
|
||||
|
||||
sub _connection_check {
|
||||
my ($self, $connection) = @_;
|
||||
$connection ||= $self->connection_info;
|
||||
my $pid = $connection->{pid};
|
||||
my $ok = (kill 0, $pid);
|
||||
$self->trace_msg("_connection_check: $ok (pid $$)\n",0) if $self->trace;
|
||||
return $ok;
|
||||
}
|
||||
|
||||
|
||||
sub _connection_kill {
|
||||
my ($self) = @_;
|
||||
my $connection = $self->connection_info;
|
||||
my ($pid, $wfh, $rfh, $efh) = @{$connection}{qw(pid wfh rfh efh)};
|
||||
$self->trace_msg("_connection_kill: closing write handle\n",0) if $self->trace;
|
||||
# closing the write file handle should be enough, generally
|
||||
close $wfh;
|
||||
# in future we may want to be more aggressive
|
||||
#close $rfh; close $efh; kill 15, $pid
|
||||
# but deleting from the persist cache...
|
||||
delete $persist{ $self->_connection_key };
|
||||
# ... and removing the connection_info should suffice
|
||||
$self->connection_info( undef );
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
sub _make_connection {
|
||||
my ($self) = @_;
|
||||
|
||||
my $go_perl = $self->go_perl;
|
||||
my $cmd = [ @$go_perl, qw(-MDBI::Gofer::Transport::stream -e run_stdio_hex)];
|
||||
|
||||
#push @$cmd, "DBI_TRACE=2=/tmp/goferstream.log", "sh", "-c";
|
||||
if (my $url = $self->go_url) {
|
||||
die "Only 'ssh:user\@host' style url supported by this transport"
|
||||
unless $url =~ s/^ssh://;
|
||||
my $ssh = $url;
|
||||
my $setup_env = join "||", map { "source $_ 2>/dev/null" } qw(.bash_profile .bash_login .profile);
|
||||
my $setup = $setup_env.q{; exec "$@"};
|
||||
# don't use $^X on remote system by default as it's possibly wrong
|
||||
$cmd->[0] = 'perl' if "@$go_perl" eq $^X;
|
||||
# -x not only 'Disables X11 forwarding' but also makes connections *much* faster
|
||||
unshift @$cmd, qw(ssh -xq), split(' ', $ssh), qw(bash -c), $setup;
|
||||
}
|
||||
|
||||
$self->trace_msg("new connection: @$cmd\n",0) if $self->trace;
|
||||
|
||||
# XXX add a handshake - some message from DBI::Gofer::Transport::stream that's
|
||||
# sent as soon as it starts that we can wait for to report success - and soak up
|
||||
# and report useful warnings etc from ssh before we get it? Increases latency though.
|
||||
my $connection = $self->start_pipe_command($cmd);
|
||||
return $connection;
|
||||
}
|
||||
|
||||
|
||||
sub transmit_request_by_transport {
|
||||
my ($self, $request) = @_;
|
||||
my $trace = $self->trace;
|
||||
|
||||
my $connection = $self->connection_info || do {
|
||||
my $con = $self->_connection_get;
|
||||
$self->connection_info( $con );
|
||||
$con;
|
||||
};
|
||||
|
||||
my $encoded_request = unpack("H*", $self->freeze_request($request));
|
||||
$encoded_request .= "\015\012";
|
||||
|
||||
my $wfh = $connection->{wfh};
|
||||
$self->trace_msg(sprintf("transmit_request_by_transport: to fh %s fd%d\n", $wfh, fileno($wfh)),0)
|
||||
if $trace >= 4;
|
||||
|
||||
# send frozen request
|
||||
local $\;
|
||||
$wfh->print($encoded_request) # autoflush enabled
|
||||
or do {
|
||||
my $err = $!;
|
||||
# XXX could/should make new connection and retry
|
||||
$self->_connection_kill;
|
||||
die "Error sending request: $err";
|
||||
};
|
||||
$self->trace_msg("Request sent: $encoded_request\n",0) if $trace >= 4;
|
||||
|
||||
return undef; # indicate no response yet (so caller calls receive_response_by_transport)
|
||||
}
|
||||
|
||||
|
||||
sub receive_response_by_transport {
|
||||
my $self = shift;
|
||||
my $trace = $self->trace;
|
||||
|
||||
$self->trace_msg("receive_response_by_transport: awaiting response\n",0) if $trace >= 4;
|
||||
my $connection = $self->connection_info || die;
|
||||
my ($pid, $rfh, $efh, $cmd) = @{$connection}{qw(pid rfh efh cmd)};
|
||||
|
||||
my $errno = 0;
|
||||
my $encoded_response;
|
||||
my $stderr_msg;
|
||||
|
||||
$self->read_response_from_fh( {
|
||||
$efh => {
|
||||
error => sub { warn "error reading response stderr: $!"; $errno||=$!; 1 },
|
||||
eof => sub { warn "eof reading efh" if $trace >= 4; 1 },
|
||||
read => sub { $stderr_msg .= $_; 0 },
|
||||
},
|
||||
$rfh => {
|
||||
error => sub { warn "error reading response: $!"; $errno||=$!; 1 },
|
||||
eof => sub { warn "eof reading rfh" if $trace >= 4; 1 },
|
||||
read => sub { $encoded_response .= $_; ($encoded_response=~s/\015\012$//) ? 1 : 0 },
|
||||
},
|
||||
});
|
||||
|
||||
# if we got no output on stdout at all then the command has
|
||||
# probably exited, possibly with an error to stderr.
|
||||
# Turn this situation into a reasonably useful DBI error.
|
||||
if (not $encoded_response) {
|
||||
my @msg;
|
||||
push @msg, "error while reading response: $errno" if $errno;
|
||||
if ($stderr_msg) {
|
||||
chomp $stderr_msg;
|
||||
push @msg, sprintf "error reported by \"%s\" (pid %d%s): %s",
|
||||
$self->cmd_as_string,
|
||||
$pid, ((kill 0, $pid) ? "" : ", exited"),
|
||||
$stderr_msg;
|
||||
}
|
||||
die join(", ", "No response received", @msg)."\n";
|
||||
}
|
||||
|
||||
$self->trace_msg("Response received: $encoded_response\n",0)
|
||||
if $trace >= 4;
|
||||
|
||||
$self->trace_msg("Gofer stream stderr message: $stderr_msg\n",0)
|
||||
if $stderr_msg && $trace;
|
||||
|
||||
my $frozen_response = pack("H*", $encoded_response);
|
||||
|
||||
# XXX need to be able to detect and deal with corruption
|
||||
my $response = $self->thaw_response($frozen_response);
|
||||
|
||||
if ($stderr_msg) {
|
||||
# add stderr messages as warnings (for PrintWarn)
|
||||
$response->add_err(0, $stderr_msg, undef, $trace)
|
||||
# but ignore warning from old version of blib
|
||||
unless $stderr_msg =~ /^Using .*blib/ && "@$cmd" =~ /-Mblib/;
|
||||
}
|
||||
|
||||
return $response;
|
||||
}
|
||||
|
||||
sub transport_timedout {
|
||||
my $self = shift;
|
||||
$self->_connection_kill;
|
||||
return $self->SUPER::transport_timedout(@_);
|
||||
}
|
||||
|
||||
1;
|
||||
|
||||
__END__
|
||||
|
||||
=head1 NAME
|
||||
|
||||
DBD::Gofer::Transport::stream - DBD::Gofer transport for stdio streaming
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
DBI->connect('dbi:Gofer:transport=stream;url=ssh:username@host.example.com;dsn=dbi:...',...)
|
||||
|
||||
or, enable by setting the DBI_AUTOPROXY environment variable:
|
||||
|
||||
export DBI_AUTOPROXY='dbi:Gofer:transport=stream;url=ssh:username@host.example.com'
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
Without the C<url=> parameter it launches a subprocess as
|
||||
|
||||
perl -MDBI::Gofer::Transport::stream -e run_stdio_hex
|
||||
|
||||
and feeds requests into it and reads responses from it. But that's not very useful.
|
||||
|
||||
With a C<url=ssh:username@host.example.com> parameter it uses ssh to launch the subprocess
|
||||
on a remote system. That's much more useful!
|
||||
|
||||
It gives you secure remote access to DBI databases on any system you can login to.
|
||||
Using ssh also gives you optional compression and many other features (see the
|
||||
ssh manual for how to configure that and many other options via ~/.ssh/config file).
|
||||
|
||||
The actual command invoked is something like:
|
||||
|
||||
ssh -xq ssh:username@host.example.com bash -c $setup $run
|
||||
|
||||
where $run is the command shown above, and $command is
|
||||
|
||||
. .bash_profile 2>/dev/null || . .bash_login 2>/dev/null || . .profile 2>/dev/null; exec "$@"
|
||||
|
||||
which is trying (in a limited and fairly unportable way) to setup the environment
|
||||
(PATH, PERL5LIB etc) as it would be if you had logged in to that system.
|
||||
|
||||
The "C<perl>" used in the command will default to the value of $^X when not using ssh.
|
||||
On most systems that's the full path to the perl that's currently executing.
|
||||
|
||||
|
||||
=head1 PERSISTENCE
|
||||
|
||||
Currently gofer stream connections persist (remain connected) after all
|
||||
database handles have been disconnected. This makes later connections in the
|
||||
same process very fast.
|
||||
|
||||
Currently up to 5 different gofer stream connections (based on url) can
|
||||
persist. If more than 5 are in the cache when a new connection is made then
|
||||
the cache is cleared before adding the new connection. Simple but effective.
|
||||
|
||||
=head1 TO DO
|
||||
|
||||
Document go_perl attribute
|
||||
|
||||
Automatically reconnect (within reason) if there's a transport error.
|
||||
|
||||
Decide on default for persistent connection - on or off? limits? ttl?
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Tim Bunce, L<http://www.tim.bunce.name>
|
||||
|
||||
=head1 LICENCE AND COPYRIGHT
|
||||
|
||||
Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
|
||||
|
||||
This module is free software; you can redistribute it and/or
|
||||
modify it under the same terms as Perl itself. See L<perlartistic>.
|
||||
|
||||
=head1 SEE ALSO
|
||||
|
||||
L<DBD::Gofer::Transport::Base>
|
||||
|
||||
L<DBD::Gofer>
|
||||
|
||||
=cut
|
|
@ -0,0 +1,376 @@
|
|||
# -*- perl -*-
|
||||
#
|
||||
# DBD::Mem - A DBI driver for in-memory tables
|
||||
#
|
||||
# This module is currently maintained by
|
||||
#
|
||||
# Jens Rehsack
|
||||
#
|
||||
# Copyright (C) 2016,2017 by Jens Rehsack
|
||||
#
|
||||
# All rights reserved.
|
||||
#
|
||||
# You may distribute this module under the terms of either the GNU
|
||||
# General Public License or the Artistic License, as specified in
|
||||
# the Perl README file.
|
||||
|
||||
require 5.008;
|
||||
use strict;
|
||||
|
||||
#################
|
||||
package DBD::Mem;
|
||||
#################
|
||||
use base qw( DBI::DBD::SqlEngine );
|
||||
use vars qw($VERSION $ATTRIBUTION $drh);
|
||||
$VERSION = '0.001';
|
||||
$ATTRIBUTION = 'DBD::Mem by Jens Rehsack';
|
||||
|
||||
# no need to have driver() unless you need private methods
|
||||
#
|
||||
sub driver ($;$)
|
||||
{
|
||||
my ( $class, $attr ) = @_;
|
||||
return $drh if ($drh);
|
||||
|
||||
# do the real work in DBI::DBD::SqlEngine
|
||||
#
|
||||
$attr->{Attribution} = 'DBD::Mem by Jens Rehsack';
|
||||
$drh = $class->SUPER::driver($attr);
|
||||
|
||||
return $drh;
|
||||
}
|
||||
|
||||
sub CLONE
|
||||
{
|
||||
undef $drh;
|
||||
}
|
||||
|
||||
#####################
|
||||
package DBD::Mem::dr;
|
||||
#####################
|
||||
$DBD::Mem::dr::imp_data_size = 0;
|
||||
@DBD::Mem::dr::ISA = qw(DBI::DBD::SqlEngine::dr);
|
||||
|
||||
# you could put some :dr private methods here
|
||||
|
||||
# you may need to over-ride some DBI::DBD::SqlEngine::dr methods here
|
||||
# but you can probably get away with just letting it do the work
|
||||
# in most cases
|
||||
|
||||
#####################
|
||||
package DBD::Mem::db;
|
||||
#####################
|
||||
$DBD::Mem::db::imp_data_size = 0;
|
||||
@DBD::Mem::db::ISA = qw(DBI::DBD::SqlEngine::db);
|
||||
|
||||
use Carp qw/carp/;
|
||||
|
||||
sub set_versions
|
||||
{
|
||||
my $this = $_[0];
|
||||
$this->{mem_version} = $DBD::Mem::VERSION;
|
||||
return $this->SUPER::set_versions();
|
||||
}
|
||||
|
||||
sub init_valid_attributes
|
||||
{
|
||||
my $dbh = shift;
|
||||
|
||||
# define valid private attributes
|
||||
#
|
||||
# attempts to set non-valid attrs in connect() or
|
||||
# with $dbh->{attr} will throw errors
|
||||
#
|
||||
# the attrs here *must* start with mem_ or foo_
|
||||
#
|
||||
# see the STORE methods below for how to check these attrs
|
||||
#
|
||||
$dbh->{mem_valid_attrs} = {
|
||||
mem_version => 1, # verbose DBD::Mem version
|
||||
mem_valid_attrs => 1, # DBD::Mem::db valid attrs
|
||||
mem_readonly_attrs => 1, # DBD::Mem::db r/o attrs
|
||||
mem_meta => 1, # DBD::Mem public access for f_meta
|
||||
mem_tables => 1, # DBD::Mem public access for f_meta
|
||||
};
|
||||
$dbh->{mem_readonly_attrs} = {
|
||||
mem_version => 1, # verbose DBD::Mem version
|
||||
mem_valid_attrs => 1, # DBD::Mem::db valid attrs
|
||||
mem_readonly_attrs => 1, # DBD::Mem::db r/o attrs
|
||||
mem_meta => 1, # DBD::Mem public access for f_meta
|
||||
};
|
||||
|
||||
$dbh->{mem_meta} = "mem_tables";
|
||||
|
||||
return $dbh->SUPER::init_valid_attributes();
|
||||
}
|
||||
|
||||
sub get_mem_versions
|
||||
{
|
||||
my ( $dbh, $table ) = @_;
|
||||
$table ||= '';
|
||||
|
||||
my $meta;
|
||||
my $class = $dbh->{ImplementorClass};
|
||||
$class =~ s/::db$/::Table/;
|
||||
$table and ( undef, $meta ) = $class->get_table_meta( $dbh, $table, 1 );
|
||||
$meta or ( $meta = {} and $class->bootstrap_table_meta( $dbh, $meta, $table ) );
|
||||
|
||||
return sprintf( "%s using %s", $dbh->{mem_version}, $AnyData2::VERSION );
|
||||
}
|
||||
|
||||
package DBD::Mem::st;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
our $imp_data_size = 0;
|
||||
our @ISA = qw(DBI::DBD::SqlEngine::st);
|
||||
|
||||
############################
|
||||
package DBD::Mem::Statement;
|
||||
############################
|
||||
|
||||
@DBD::Mem::Statement::ISA = qw(DBI::DBD::SqlEngine::Statement);
|
||||
|
||||
|
||||
sub open_table ($$$$$)
|
||||
{
|
||||
my ( $self, $data, $table, $createMode, $lockMode ) = @_;
|
||||
|
||||
my $class = ref $self;
|
||||
$class =~ s/::Statement/::Table/;
|
||||
|
||||
my $flags = {
|
||||
createMode => $createMode,
|
||||
lockMode => $lockMode,
|
||||
};
|
||||
if( defined( $data->{Database}->{mem_table_data}->{$table} ) && $data->{Database}->{mem_table_data}->{$table})
|
||||
{
|
||||
my $t = $data->{Database}->{mem_tables}->{$table};
|
||||
$t->seek( $data, 0, 0 );
|
||||
return $t;
|
||||
}
|
||||
|
||||
return $self->SUPER::open_table($data, $table, $createMode, $lockMode);
|
||||
}
|
||||
|
||||
# ====== DataSource ============================================================
|
||||
|
||||
package DBD::Mem::DataSource;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use Carp;
|
||||
|
||||
@DBD::Mem::DataSource::ISA = "DBI::DBD::SqlEngine::DataSource";
|
||||
|
||||
sub complete_table_name ($$;$)
|
||||
{
|
||||
my ( $self, $meta, $table, $respect_case ) = @_;
|
||||
$table;
|
||||
}
|
||||
|
||||
sub open_data ($)
|
||||
{
|
||||
my ( $self, $meta, $attrs, $flags ) = @_;
|
||||
defined $meta->{data_tbl} or $meta->{data_tbl} = [];
|
||||
}
|
||||
|
||||
########################
|
||||
package DBD::Mem::Table;
|
||||
########################
|
||||
|
||||
# shamelessly stolen from SQL::Statement::RAM
|
||||
|
||||
use Carp qw/croak/;
|
||||
|
||||
@DBD::Mem::Table::ISA = qw(DBI::DBD::SqlEngine::Table);
|
||||
|
||||
use Carp qw(croak);
|
||||
|
||||
sub new
|
||||
{
|
||||
#my ( $class, $tname, $col_names, $data_tbl ) = @_;
|
||||
my ( $class, $data, $attrs, $flags ) = @_;
|
||||
my $self = $class->SUPER::new($data, $attrs, $flags);
|
||||
|
||||
my $meta = $self->{meta};
|
||||
$self->{records} = $meta->{data_tbl};
|
||||
$self->{index} = 0;
|
||||
|
||||
$self;
|
||||
}
|
||||
|
||||
sub bootstrap_table_meta
|
||||
{
|
||||
my ( $self, $dbh, $meta, $table ) = @_;
|
||||
|
||||
defined $meta->{sql_data_source} or $meta->{sql_data_source} = "DBD::Mem::DataSource";
|
||||
|
||||
$meta;
|
||||
}
|
||||
|
||||
sub fetch_row
|
||||
{
|
||||
my ( $self, $data ) = @_;
|
||||
|
||||
return $self->{row} =
|
||||
( $self->{records} and ( $self->{index} < scalar( @{ $self->{records} } ) ) )
|
||||
? [ @{ $self->{records}->[ $self->{index}++ ] } ]
|
||||
: undef;
|
||||
}
|
||||
|
||||
sub push_row
|
||||
{
|
||||
my ( $self, $data, $fields ) = @_;
|
||||
my $currentRow = $self->{index};
|
||||
$self->{index} = $currentRow + 1;
|
||||
$self->{records}->[$currentRow] = $fields;
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub truncate
|
||||
{
|
||||
my $self = shift;
|
||||
return splice @{ $self->{records} }, $self->{index}, 1;
|
||||
}
|
||||
|
||||
sub push_names
|
||||
{
|
||||
my ( $self, $data, $names ) = @_;
|
||||
my $meta = $self->{meta};
|
||||
$meta->{col_names} = $self->{col_names} = $names;
|
||||
$self->{org_col_names} = [ @{$names} ];
|
||||
$self->{col_nums} = {};
|
||||
$self->{col_nums}{ $names->[$_] } = $_ for ( 0 .. scalar @$names - 1 );
|
||||
}
|
||||
|
||||
sub drop ($)
|
||||
{
|
||||
my ($self, $data) = @_;
|
||||
delete $data->{Database}{sql_meta}{$self->{table}};
|
||||
return 1;
|
||||
} # drop
|
||||
|
||||
sub seek
|
||||
{
|
||||
my ( $self, $data, $pos, $whence ) = @_;
|
||||
return unless defined $self->{records};
|
||||
|
||||
my ($currentRow) = $self->{index};
|
||||
if ( $whence == 0 )
|
||||
{
|
||||
$currentRow = $pos;
|
||||
}
|
||||
elsif ( $whence == 1 )
|
||||
{
|
||||
$currentRow += $pos;
|
||||
}
|
||||
elsif ( $whence == 2 )
|
||||
{
|
||||
$currentRow = @{ $self->{records} } + $pos;
|
||||
}
|
||||
else
|
||||
{
|
||||
croak $self . "->seek: Illegal whence argument ($whence)";
|
||||
}
|
||||
|
||||
$currentRow < 0 and
|
||||
croak "Illegal row number: $currentRow";
|
||||
$self->{index} = $currentRow;
|
||||
}
|
||||
|
||||
1;
|
||||
|
||||
=head1 NAME
|
||||
|
||||
DBD::Mem - a DBI driver for Mem & MLMem files
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
use DBI;
|
||||
$dbh = DBI->connect('dbi:Mem:', undef, undef, {});
|
||||
$dbh = DBI->connect('dbi:Mem:', undef, undef, {RaiseError => 1});
|
||||
|
||||
# or
|
||||
$dbh = DBI->connect('dbi:Mem:');
|
||||
$dbh = DBI->connect('DBI:Mem(RaiseError=1):');
|
||||
|
||||
and other variations on connect() as shown in the L<DBI> docs and
|
||||
<DBI::DBD::SqlEngine metadata|DBI::DBD::SqlEngine/Metadata>.
|
||||
|
||||
Use standard DBI prepare, execute, fetch, placeholders, etc.,
|
||||
see L<QUICK START> for an example.
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
DBD::Mem is a database management system that works right out of the box.
|
||||
If you have a standard installation of Perl and DBI you can begin creating,
|
||||
accessing, and modifying simple database tables without any further modules.
|
||||
You can add other modules (e.g., SQL::Statement) for improved functionality.
|
||||
|
||||
DBD::Mem doesn't store any data persistently - all data has the lifetime of
|
||||
the instantiated C<$dbh>. The main reason to use DBD::Mem is to use extended
|
||||
features of L<SQL::Statement> where temporary tables are required. One can
|
||||
use DBD::Mem to simulate C<VIEWS> or sub-queries.
|
||||
|
||||
Bundling C<DBD::Mem> with L<DBI> will allow us further compatibility checks
|
||||
of L<DBI::DBD::SqlEngine> beyond the capabilities of L<DBD::File> and
|
||||
L<DBD::DBM>. This will ensure DBI provided basis for drivers like
|
||||
L<DBD::AnyData2> or L<DBD::Amazon> are better prepared and tested for
|
||||
not-file based backends.
|
||||
|
||||
=head2 Metadata
|
||||
|
||||
There're no new meta data introduced by C<DBD::Mem>. See
|
||||
L<DBI::DBD::SqlEngine/Metadata> for full description.
|
||||
|
||||
=head1 GETTING HELP, MAKING SUGGESTIONS, AND REPORTING BUGS
|
||||
|
||||
If you need help installing or using DBD::Mem, please write to the DBI
|
||||
users mailing list at L<mailto:dbi-users@perl.org> or to the
|
||||
comp.lang.perl.modules newsgroup on usenet. I cannot always answer
|
||||
every question quickly but there are many on the mailing list or in
|
||||
the newsgroup who can.
|
||||
|
||||
DBD developers for DBD's which rely on DBI::DBD::SqlEngine or DBD::Mem or
|
||||
use one of them as an example are suggested to join the DBI developers
|
||||
mailing list at L<mailto:dbi-dev@perl.org> and strongly encouraged to join our
|
||||
IRC channel at L<irc://irc.perl.org/dbi>.
|
||||
|
||||
If you have suggestions, ideas for improvements, or bugs to report, please
|
||||
report a bug as described in DBI. Do not mail any of the authors directly,
|
||||
you might not get an answer.
|
||||
|
||||
When reporting bugs, please send the output of C<< $dbh->mem_versions($table) >>
|
||||
for a table that exhibits the bug and as small a sample as you can make of
|
||||
the code that produces the bug. And of course, patches are welcome, too
|
||||
:-).
|
||||
|
||||
If you need enhancements quickly, you can get commercial support as
|
||||
described at L<http://dbi.perl.org/support/> or you can contact Jens Rehsack
|
||||
at rehsack@cpan.org for commercial support.
|
||||
|
||||
=head1 AUTHOR AND COPYRIGHT
|
||||
|
||||
This module is written by Jens Rehsack < rehsack AT cpan.org >.
|
||||
|
||||
Copyright (c) 2016- by Jens Rehsack, all rights reserved.
|
||||
|
||||
You may freely distribute and/or modify this module under the terms of
|
||||
either the GNU General Public License (GPL) or the Artistic License, as
|
||||
specified in the Perl README file.
|
||||
|
||||
=head1 SEE ALSO
|
||||
|
||||
L<DBI> for the Database interface of the Perl Programming Language.
|
||||
|
||||
L<SQL::Statement> and L<DBI::SQL::Nano> for the available SQL engines.
|
||||
|
||||
L<SQL::Statement::RAM> where the implementation is shamelessly stolen from
|
||||
to allow DBI bundled Pure-Perl drivers increase the test coverage.
|
||||
|
||||
L<DBD::SQLite> using C<dbname=:memory:> for an incredible fast in-memory database engine.
|
||||
|
||||
=cut
|
|
@ -0,0 +1,206 @@
|
|||
use strict;
|
||||
{
|
||||
package DBD::NullP;
|
||||
|
||||
require DBI;
|
||||
require Carp;
|
||||
|
||||
our @EXPORT = qw(); # Do NOT @EXPORT anything.
|
||||
our $VERSION = "12.014715";
|
||||
|
||||
# $Id: NullP.pm 14714 2011-02-22 17:27:07Z Tim $
|
||||
#
|
||||
# Copyright (c) 1994-2007 Tim Bunce
|
||||
#
|
||||
# You may distribute under the terms of either the GNU General Public
|
||||
# License or the Artistic License, as specified in the Perl README file.
|
||||
|
||||
our $drh = undef; # holds driver handle once initialised
|
||||
|
||||
sub driver{
|
||||
return $drh if $drh;
|
||||
my($class, $attr) = @_;
|
||||
$class .= "::dr";
|
||||
($drh) = DBI::_new_drh($class, {
|
||||
'Name' => 'NullP',
|
||||
'Version' => $VERSION,
|
||||
'Attribution' => 'DBD Example Null Perl stub by Tim Bunce',
|
||||
}, [ qw'example implementors private data']);
|
||||
$drh;
|
||||
}
|
||||
|
||||
sub CLONE {
|
||||
undef $drh;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
{ package DBD::NullP::dr; # ====== DRIVER ======
|
||||
our $imp_data_size = 0;
|
||||
use strict;
|
||||
|
||||
sub connect { # normally overridden, but a handy default
|
||||
my $dbh = shift->SUPER::connect(@_)
|
||||
or return;
|
||||
$dbh->STORE(Active => 1);
|
||||
$dbh;
|
||||
}
|
||||
|
||||
|
||||
sub DESTROY { undef }
|
||||
}
|
||||
|
||||
|
||||
{ package DBD::NullP::db; # ====== DATABASE ======
|
||||
our $imp_data_size = 0;
|
||||
use strict;
|
||||
use Carp qw(croak);
|
||||
|
||||
# Added get_info to support tests in 10examp.t
|
||||
sub get_info {
|
||||
my ($dbh, $type) = @_;
|
||||
|
||||
if ($type == 29) { # identifier quote
|
||||
return '"';
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
# Added table_info to support tests in 10examp.t
|
||||
sub table_info {
|
||||
my ($dbh, $catalog, $schema, $table, $type) = @_;
|
||||
|
||||
my ($outer, $sth) = DBI::_new_sth($dbh, {
|
||||
'Statement' => 'tables',
|
||||
});
|
||||
if (defined($type) && $type eq '%' && # special case for tables('','','','%')
|
||||
grep {defined($_) && $_ eq ''} ($catalog, $schema, $table)) {
|
||||
$outer->{dbd_nullp_data} = [[undef, undef, undef, 'TABLE', undef],
|
||||
[undef, undef, undef, 'VIEW', undef],
|
||||
[undef, undef, undef, 'ALIAS', undef]];
|
||||
} elsif (defined($catalog) && $catalog eq '%' && # special case for tables('%','','')
|
||||
grep {defined($_) && $_ eq ''} ($schema, $table)) {
|
||||
$outer->{dbd_nullp_data} = [['catalog1', undef, undef, undef, undef],
|
||||
['catalog2', undef, undef, undef, undef]];
|
||||
} else {
|
||||
$outer->{dbd_nullp_data} = [['catalog', 'schema', 'table1', 'TABLE']];
|
||||
$outer->{dbd_nullp_data} = [['catalog', 'schema', 'table2', 'TABLE']];
|
||||
$outer->{dbd_nullp_data} = [['catalog', 'schema', 'table3', 'TABLE']];
|
||||
}
|
||||
$outer->STORE(NUM_OF_FIELDS => 5);
|
||||
$sth->STORE(Active => 1);
|
||||
return $outer;
|
||||
}
|
||||
|
||||
sub prepare {
|
||||
my ($dbh, $statement)= @_;
|
||||
|
||||
my ($outer, $sth) = DBI::_new_sth($dbh, {
|
||||
'Statement' => $statement,
|
||||
});
|
||||
|
||||
return $outer;
|
||||
}
|
||||
|
||||
sub FETCH {
|
||||
my ($dbh, $attrib) = @_;
|
||||
# In reality this would interrogate the database engine to
|
||||
# either return dynamic values that cannot be precomputed
|
||||
# or fetch and cache attribute values too expensive to prefetch.
|
||||
return $dbh->SUPER::FETCH($attrib);
|
||||
}
|
||||
|
||||
sub STORE {
|
||||
my ($dbh, $attrib, $value) = @_;
|
||||
# would normally validate and only store known attributes
|
||||
# else pass up to DBI to handle
|
||||
if ($attrib eq 'AutoCommit') {
|
||||
Carp::croak("Can't disable AutoCommit") unless $value;
|
||||
# convert AutoCommit values to magic ones to let DBI
|
||||
# know that the driver has 'handled' the AutoCommit attribute
|
||||
$value = ($value) ? -901 : -900;
|
||||
} elsif ($attrib eq 'nullp_set_err') {
|
||||
# a fake attribute to produce a test case where STORE issues a warning
|
||||
$dbh->set_err($value, $value);
|
||||
}
|
||||
return $dbh->SUPER::STORE($attrib, $value);
|
||||
}
|
||||
|
||||
sub ping { 1 }
|
||||
|
||||
sub disconnect {
|
||||
shift->STORE(Active => 0);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
{ package DBD::NullP::st; # ====== STATEMENT ======
|
||||
our $imp_data_size = 0;
|
||||
use strict;
|
||||
|
||||
sub bind_param {
|
||||
my ($sth, $param, $value, $attr) = @_;
|
||||
$sth->{ParamValues}{$param} = $value;
|
||||
$sth->{ParamAttr}{$param} = $attr
|
||||
if defined $attr; # attr is sticky if not explicitly set
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub execute {
|
||||
my $sth = shift;
|
||||
$sth->bind_param($_, $_[$_-1]) for (1..@_);
|
||||
if ($sth->{Statement} =~ m/^ \s* SELECT \s+/xmsi) {
|
||||
$sth->STORE(NUM_OF_FIELDS => 1);
|
||||
$sth->{NAME} = [ "fieldname" ];
|
||||
# just for the sake of returning something, we return the params
|
||||
my $params = $sth->{ParamValues} || {};
|
||||
$sth->{dbd_nullp_data} = [ @{$params}{ sort keys %$params } ];
|
||||
$sth->STORE(Active => 1);
|
||||
}
|
||||
# force a sleep - handy for testing
|
||||
elsif ($sth->{Statement} =~ m/^ \s* SLEEP \s+ (\S+) /xmsi) {
|
||||
my $secs = $1;
|
||||
if (eval { require Time::HiRes; defined &Time::HiRes::sleep }) {
|
||||
Time::HiRes::sleep($secs);
|
||||
}
|
||||
else {
|
||||
sleep $secs;
|
||||
}
|
||||
}
|
||||
# force an error - handy for testing
|
||||
elsif ($sth->{Statement} =~ m/^ \s* ERROR \s+ (\d+) \s* (.*) /xmsi) {
|
||||
return $sth->set_err($1, $2);
|
||||
}
|
||||
# anything else is silently ignored, successfully
|
||||
1;
|
||||
}
|
||||
|
||||
sub fetchrow_arrayref {
|
||||
my $sth = shift;
|
||||
my $data = shift @{$sth->{dbd_nullp_data}};
|
||||
if (!$data || !@$data) {
|
||||
$sth->finish; # no more data so finish
|
||||
return undef;
|
||||
}
|
||||
return $sth->_set_fbav($data);
|
||||
}
|
||||
*fetch = \&fetchrow_arrayref; # alias
|
||||
|
||||
sub FETCH {
|
||||
my ($sth, $attrib) = @_;
|
||||
# would normally validate and only fetch known attributes
|
||||
# else pass up to DBI to handle
|
||||
return $sth->SUPER::FETCH($attrib);
|
||||
}
|
||||
|
||||
sub STORE {
|
||||
my ($sth, $attrib, $value) = @_;
|
||||
# would normally validate and only store known attributes
|
||||
# else pass up to DBI to handle
|
||||
return $sth->SUPER::STORE($attrib, $value);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
1;
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,305 @@
|
|||
use strict;
|
||||
{
|
||||
package DBD::Sponge;
|
||||
|
||||
require DBI;
|
||||
require Carp;
|
||||
|
||||
our @EXPORT = qw(); # Do NOT @EXPORT anything.
|
||||
our $VERSION = "12.010003";
|
||||
|
||||
# $Id: Sponge.pm 10002 2007-09-26 21:03:25Z Tim $
|
||||
#
|
||||
# Copyright (c) 1994-2003 Tim Bunce Ireland
|
||||
#
|
||||
# You may distribute under the terms of either the GNU General Public
|
||||
# License or the Artistic License, as specified in the Perl README file.
|
||||
|
||||
our $drh = undef; # holds driver handle once initialised
|
||||
my $methods_already_installed;
|
||||
|
||||
sub driver{
|
||||
return $drh if $drh;
|
||||
|
||||
DBD::Sponge::db->install_method("sponge_test_installed_method")
|
||||
unless $methods_already_installed++;
|
||||
|
||||
my($class, $attr) = @_;
|
||||
$class .= "::dr";
|
||||
($drh) = DBI::_new_drh($class, {
|
||||
'Name' => 'Sponge',
|
||||
'Version' => $VERSION,
|
||||
'Attribution' => "DBD::Sponge $VERSION (fake cursor driver) by Tim Bunce",
|
||||
});
|
||||
$drh;
|
||||
}
|
||||
|
||||
sub CLONE {
|
||||
undef $drh;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
{ package DBD::Sponge::dr; # ====== DRIVER ======
|
||||
our $imp_data_size = 0;
|
||||
# we use default (dummy) connect method
|
||||
}
|
||||
|
||||
|
||||
{ package DBD::Sponge::db; # ====== DATABASE ======
|
||||
our $imp_data_size = 0;
|
||||
use strict;
|
||||
|
||||
sub prepare {
|
||||
my($dbh, $statement, $attribs) = @_;
|
||||
my $rows = delete $attribs->{'rows'}
|
||||
or return $dbh->set_err($DBI::stderr,"No rows attribute supplied to prepare");
|
||||
my ($outer, $sth) = DBI::_new_sth($dbh, {
|
||||
'Statement' => $statement,
|
||||
'rows' => $rows,
|
||||
(map { exists $attribs->{$_} ? ($_=>$attribs->{$_}) : () }
|
||||
qw(execute_hook)
|
||||
),
|
||||
});
|
||||
if (my $behave_like = $attribs->{behave_like}) {
|
||||
$outer->{$_} = $behave_like->{$_}
|
||||
foreach (qw(RaiseError PrintError HandleError ShowErrorStatement));
|
||||
}
|
||||
|
||||
if ($statement =~ /^\s*insert\b/) { # very basic, just for testing execute_array()
|
||||
$sth->{is_insert} = 1;
|
||||
my $NUM_OF_PARAMS = $attribs->{NUM_OF_PARAMS}
|
||||
or return $dbh->set_err($DBI::stderr,"NUM_OF_PARAMS not specified for INSERT statement");
|
||||
$sth->STORE('NUM_OF_PARAMS' => $attribs->{NUM_OF_PARAMS} );
|
||||
}
|
||||
else { #assume select
|
||||
|
||||
# we need to set NUM_OF_FIELDS
|
||||
my $numFields;
|
||||
if ($attribs->{'NUM_OF_FIELDS'}) {
|
||||
$numFields = $attribs->{'NUM_OF_FIELDS'};
|
||||
} elsif ($attribs->{'NAME'}) {
|
||||
$numFields = @{$attribs->{NAME}};
|
||||
} elsif ($attribs->{'TYPE'}) {
|
||||
$numFields = @{$attribs->{TYPE}};
|
||||
} elsif (my $firstrow = $rows->[0]) {
|
||||
$numFields = scalar @$firstrow;
|
||||
} else {
|
||||
return $dbh->set_err($DBI::stderr, 'Cannot determine NUM_OF_FIELDS');
|
||||
}
|
||||
$sth->STORE('NUM_OF_FIELDS' => $numFields);
|
||||
$sth->{NAME} = $attribs->{NAME}
|
||||
|| [ map { "col$_" } 1..$numFields ];
|
||||
$sth->{TYPE} = $attribs->{TYPE}
|
||||
|| [ (DBI::SQL_VARCHAR()) x $numFields ];
|
||||
$sth->{PRECISION} = $attribs->{PRECISION}
|
||||
|| [ map { length($sth->{NAME}->[$_]) } 0..$numFields -1 ];
|
||||
$sth->{SCALE} = $attribs->{SCALE}
|
||||
|| [ (0) x $numFields ];
|
||||
$sth->{NULLABLE} = $attribs->{NULLABLE}
|
||||
|| [ (2) x $numFields ];
|
||||
}
|
||||
|
||||
$outer;
|
||||
}
|
||||
|
||||
sub type_info_all {
|
||||
my ($dbh) = @_;
|
||||
my $ti = [
|
||||
{ TYPE_NAME => 0,
|
||||
DATA_TYPE => 1,
|
||||
PRECISION => 2,
|
||||
LITERAL_PREFIX => 3,
|
||||
LITERAL_SUFFIX => 4,
|
||||
CREATE_PARAMS => 5,
|
||||
NULLABLE => 6,
|
||||
CASE_SENSITIVE => 7,
|
||||
SEARCHABLE => 8,
|
||||
UNSIGNED_ATTRIBUTE=> 9,
|
||||
MONEY => 10,
|
||||
AUTO_INCREMENT => 11,
|
||||
LOCAL_TYPE_NAME => 12,
|
||||
MINIMUM_SCALE => 13,
|
||||
MAXIMUM_SCALE => 14,
|
||||
},
|
||||
[ 'VARCHAR', DBI::SQL_VARCHAR(), undef, "'","'", undef, 0, 1, 1, 0, 0,0,undef,0,0 ],
|
||||
];
|
||||
return $ti;
|
||||
}
|
||||
|
||||
sub FETCH {
|
||||
my ($dbh, $attrib) = @_;
|
||||
# In reality this would interrogate the database engine to
|
||||
# either return dynamic values that cannot be precomputed
|
||||
# or fetch and cache attribute values too expensive to prefetch.
|
||||
return 1 if $attrib eq 'AutoCommit';
|
||||
# else pass up to DBI to handle
|
||||
return $dbh->SUPER::FETCH($attrib);
|
||||
}
|
||||
|
||||
sub STORE {
|
||||
my ($dbh, $attrib, $value) = @_;
|
||||
# would normally validate and only store known attributes
|
||||
# else pass up to DBI to handle
|
||||
if ($attrib eq 'AutoCommit') {
|
||||
return 1 if $value; # is already set
|
||||
Carp::croak("Can't disable AutoCommit");
|
||||
}
|
||||
return $dbh->SUPER::STORE($attrib, $value);
|
||||
}
|
||||
|
||||
sub sponge_test_installed_method {
|
||||
my ($dbh, @args) = @_;
|
||||
return $dbh->set_err(42, "not enough parameters") unless @args >= 2;
|
||||
return \@args;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
{ package DBD::Sponge::st; # ====== STATEMENT ======
|
||||
our $imp_data_size = 0;
|
||||
use strict;
|
||||
|
||||
sub execute {
|
||||
my $sth = shift;
|
||||
|
||||
# hack to support ParamValues (when not using bind_param)
|
||||
$sth->{ParamValues} = (@_) ? { map { $_ => $_[$_-1] } 1..@_ } : undef;
|
||||
|
||||
if (my $hook = $sth->{execute_hook}) {
|
||||
&$hook($sth, @_) or return;
|
||||
}
|
||||
|
||||
if ($sth->{is_insert}) {
|
||||
my $row;
|
||||
$row = (@_) ? [ @_ ] : die "bind_param not supported yet" ;
|
||||
my $NUM_OF_PARAMS = $sth->{NUM_OF_PARAMS};
|
||||
return $sth->set_err($DBI::stderr, @$row." values bound (@$row) but $NUM_OF_PARAMS expected")
|
||||
if @$row != $NUM_OF_PARAMS;
|
||||
{ local $^W; $sth->trace_msg("inserting (@$row)\n"); }
|
||||
push @{ $sth->{rows} }, $row;
|
||||
}
|
||||
else { # mark select sth as Active
|
||||
$sth->STORE(Active => 1);
|
||||
}
|
||||
# else do nothing for select as data is already in $sth->{rows}
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub fetch {
|
||||
my ($sth) = @_;
|
||||
my $row = shift @{$sth->{'rows'}};
|
||||
unless ($row) {
|
||||
$sth->STORE(Active => 0);
|
||||
return undef;
|
||||
}
|
||||
return $sth->_set_fbav($row);
|
||||
}
|
||||
*fetchrow_arrayref = \&fetch;
|
||||
|
||||
sub FETCH {
|
||||
my ($sth, $attrib) = @_;
|
||||
# would normally validate and only fetch known attributes
|
||||
# else pass up to DBI to handle
|
||||
return $sth->SUPER::FETCH($attrib);
|
||||
}
|
||||
|
||||
sub STORE {
|
||||
my ($sth, $attrib, $value) = @_;
|
||||
# would normally validate and only store known attributes
|
||||
# else pass up to DBI to handle
|
||||
return $sth->SUPER::STORE($attrib, $value);
|
||||
}
|
||||
}
|
||||
|
||||
1;
|
||||
|
||||
__END__
|
||||
|
||||
=pod
|
||||
|
||||
=head1 NAME
|
||||
|
||||
DBD::Sponge - Create a DBI statement handle from Perl data
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
my $sponge = DBI->connect("dbi:Sponge:","","",{ RaiseError => 1 });
|
||||
my $sth = $sponge->prepare($statement, {
|
||||
rows => $data,
|
||||
NAME => $names,
|
||||
%attr
|
||||
}
|
||||
);
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
DBD::Sponge is useful for making a Perl data structure accessible through a
|
||||
standard DBI statement handle. This may be useful to DBD module authors who
|
||||
need to transform data in this way.
|
||||
|
||||
=head1 METHODS
|
||||
|
||||
=head2 connect()
|
||||
|
||||
my $sponge = DBI->connect("dbi:Sponge:","","",{ RaiseError => 1 });
|
||||
|
||||
Here's a sample syntax for creating a database handle for the Sponge driver.
|
||||
No username and password are needed.
|
||||
|
||||
=head2 prepare()
|
||||
|
||||
my $sth = $sponge->prepare($statement, {
|
||||
rows => $data,
|
||||
NAME => $names,
|
||||
%attr
|
||||
}
|
||||
);
|
||||
|
||||
=over 4
|
||||
|
||||
=item *
|
||||
|
||||
The C<$statement> here is an arbitrary statement or name you want
|
||||
to provide as identity of your data. If you're using DBI::Profile
|
||||
it will appear in the profile data.
|
||||
|
||||
Generally it's expected that you are preparing a statement handle
|
||||
as if a C<select> statement happened.
|
||||
|
||||
=item *
|
||||
|
||||
C<$data> is a reference to the data you are providing, given as an array of arrays.
|
||||
|
||||
=item *
|
||||
|
||||
C<$names> is a reference an array of column names for the C<$data> you are providing.
|
||||
The number and order should match the number and ordering of the C<$data> columns.
|
||||
|
||||
=item *
|
||||
|
||||
C<%attr> is a hash of other standard DBI attributes that you might pass to a prepare statement.
|
||||
|
||||
Currently only NAME, TYPE, and PRECISION are supported.
|
||||
|
||||
=back
|
||||
|
||||
=head1 BUGS
|
||||
|
||||
Using this module to prepare INSERT-like statements is not currently documented.
|
||||
|
||||
=head1 AUTHOR AND COPYRIGHT
|
||||
|
||||
This module is Copyright (c) 2003 Tim Bunce
|
||||
|
||||
Documentation initially written by Mark Stosberg
|
||||
|
||||
The DBD::Sponge module is free software; you can redistribute it and/or
|
||||
modify it under the same terms as Perl itself. In particular permission
|
||||
is granted to Tim Bunce for distributing this as a part of the DBI.
|
||||
|
||||
=head1 SEE ALSO
|
||||
|
||||
L<DBI>
|
||||
|
||||
=cut
|
|
@ -0,0 +1,238 @@
|
|||
# $Id: ANSI.pm 8696 2007-01-24 23:12:38Z Tim $
|
||||
#
|
||||
# Copyright (c) 2002 Tim Bunce Ireland
|
||||
#
|
||||
# Constant data describing ANSI CLI info types and return values for the
|
||||
# SQLGetInfo() method of ODBC.
|
||||
#
|
||||
# You may distribute under the terms of either the GNU General Public
|
||||
# License or the Artistic License, as specified in the Perl README file.
|
||||
use strict;
|
||||
|
||||
package DBI::Const::GetInfo::ANSI;
|
||||
|
||||
our (%InfoTypes,%ReturnTypes,%ReturnValues,);
|
||||
|
||||
=head1 NAME
|
||||
|
||||
DBI::Const::GetInfo::ANSI - ISO/IEC SQL/CLI Constants for GetInfo
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
The API for this module is private and subject to change.
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
Information requested by GetInfo().
|
||||
|
||||
See: A.1 C header file SQLCLI.H, Page 316, 317.
|
||||
|
||||
The API for this module is private and subject to change.
|
||||
|
||||
=head1 REFERENCES
|
||||
|
||||
ISO/IEC FCD 9075-3:200x Information technology - Database Languages -
|
||||
SQL - Part 3: Call-Level Interface (SQL/CLI)
|
||||
|
||||
SC32 N00744 = WG3:VIE-005 = H2-2002-007
|
||||
|
||||
Date: 2002-01-15
|
||||
|
||||
=cut
|
||||
|
||||
my
|
||||
$VERSION = "2.008697";
|
||||
|
||||
%InfoTypes =
|
||||
(
|
||||
SQL_ALTER_TABLE => 86
|
||||
, SQL_CATALOG_NAME => 10003
|
||||
, SQL_COLLATING_SEQUENCE => 10004
|
||||
, SQL_CURSOR_COMMIT_BEHAVIOR => 23
|
||||
, SQL_CURSOR_SENSITIVITY => 10001
|
||||
, SQL_DATA_SOURCE_NAME => 2
|
||||
, SQL_DATA_SOURCE_READ_ONLY => 25
|
||||
, SQL_DBMS_NAME => 17
|
||||
, SQL_DBMS_VERSION => 18
|
||||
, SQL_DEFAULT_TRANSACTION_ISOLATION => 26
|
||||
, SQL_DESCRIBE_PARAMETER => 10002
|
||||
, SQL_FETCH_DIRECTION => 8
|
||||
, SQL_GETDATA_EXTENSIONS => 81
|
||||
, SQL_IDENTIFIER_CASE => 28
|
||||
, SQL_INTEGRITY => 73
|
||||
, SQL_MAXIMUM_CATALOG_NAME_LENGTH => 34
|
||||
, SQL_MAXIMUM_COLUMNS_IN_GROUP_BY => 97
|
||||
, SQL_MAXIMUM_COLUMNS_IN_ORDER_BY => 99
|
||||
, SQL_MAXIMUM_COLUMNS_IN_SELECT => 100
|
||||
, SQL_MAXIMUM_COLUMNS_IN_TABLE => 101
|
||||
, SQL_MAXIMUM_COLUMN_NAME_LENGTH => 30
|
||||
, SQL_MAXIMUM_CONCURRENT_ACTIVITIES => 1
|
||||
, SQL_MAXIMUM_CURSOR_NAME_LENGTH => 31
|
||||
, SQL_MAXIMUM_DRIVER_CONNECTIONS => 0
|
||||
, SQL_MAXIMUM_IDENTIFIER_LENGTH => 10005
|
||||
, SQL_MAXIMUM_SCHEMA_NAME_LENGTH => 32
|
||||
, SQL_MAXIMUM_STMT_OCTETS => 20000
|
||||
, SQL_MAXIMUM_STMT_OCTETS_DATA => 20001
|
||||
, SQL_MAXIMUM_STMT_OCTETS_SCHEMA => 20002
|
||||
, SQL_MAXIMUM_TABLES_IN_SELECT => 106
|
||||
, SQL_MAXIMUM_TABLE_NAME_LENGTH => 35
|
||||
, SQL_MAXIMUM_USER_NAME_LENGTH => 107
|
||||
, SQL_NULL_COLLATION => 85
|
||||
, SQL_ORDER_BY_COLUMNS_IN_SELECT => 90
|
||||
, SQL_OUTER_JOIN_CAPABILITIES => 115
|
||||
, SQL_SCROLL_CONCURRENCY => 43
|
||||
, SQL_SEARCH_PATTERN_ESCAPE => 14
|
||||
, SQL_SERVER_NAME => 13
|
||||
, SQL_SPECIAL_CHARACTERS => 94
|
||||
, SQL_TRANSACTION_CAPABLE => 46
|
||||
, SQL_TRANSACTION_ISOLATION_OPTION => 72
|
||||
, SQL_USER_NAME => 47
|
||||
);
|
||||
|
||||
=head2 %ReturnTypes
|
||||
|
||||
See: Codes and data types for implementation information (Table 28), Page 85, 86.
|
||||
|
||||
Mapped to ODBC datatype names.
|
||||
|
||||
=cut
|
||||
|
||||
%ReturnTypes = # maxlen
|
||||
(
|
||||
SQL_ALTER_TABLE => 'SQLUINTEGER bitmask' # INTEGER
|
||||
, SQL_CATALOG_NAME => 'SQLCHAR' # CHARACTER (1)
|
||||
, SQL_COLLATING_SEQUENCE => 'SQLCHAR' # CHARACTER (254)
|
||||
, SQL_CURSOR_COMMIT_BEHAVIOR => 'SQLUSMALLINT' # SMALLINT
|
||||
, SQL_CURSOR_SENSITIVITY => 'SQLUINTEGER' # INTEGER
|
||||
, SQL_DATA_SOURCE_NAME => 'SQLCHAR' # CHARACTER (128)
|
||||
, SQL_DATA_SOURCE_READ_ONLY => 'SQLCHAR' # CHARACTER (1)
|
||||
, SQL_DBMS_NAME => 'SQLCHAR' # CHARACTER (254)
|
||||
, SQL_DBMS_VERSION => 'SQLCHAR' # CHARACTER (254)
|
||||
, SQL_DEFAULT_TRANSACTION_ISOLATION => 'SQLUINTEGER' # INTEGER
|
||||
, SQL_DESCRIBE_PARAMETER => 'SQLCHAR' # CHARACTER (1)
|
||||
, SQL_FETCH_DIRECTION => 'SQLUINTEGER bitmask' # INTEGER
|
||||
, SQL_GETDATA_EXTENSIONS => 'SQLUINTEGER bitmask' # INTEGER
|
||||
, SQL_IDENTIFIER_CASE => 'SQLUSMALLINT' # SMALLINT
|
||||
, SQL_INTEGRITY => 'SQLCHAR' # CHARACTER (1)
|
||||
, SQL_MAXIMUM_CATALOG_NAME_LENGTH => 'SQLUSMALLINT' # SMALLINT
|
||||
, SQL_MAXIMUM_COLUMNS_IN_GROUP_BY => 'SQLUSMALLINT' # SMALLINT
|
||||
, SQL_MAXIMUM_COLUMNS_IN_ORDER_BY => 'SQLUSMALLINT' # SMALLINT
|
||||
, SQL_MAXIMUM_COLUMNS_IN_SELECT => 'SQLUSMALLINT' # SMALLINT
|
||||
, SQL_MAXIMUM_COLUMNS_IN_TABLE => 'SQLUSMALLINT' # SMALLINT
|
||||
, SQL_MAXIMUM_COLUMN_NAME_LENGTH => 'SQLUSMALLINT' # SMALLINT
|
||||
, SQL_MAXIMUM_CONCURRENT_ACTIVITIES => 'SQLUSMALLINT' # SMALLINT
|
||||
, SQL_MAXIMUM_CURSOR_NAME_LENGTH => 'SQLUSMALLINT' # SMALLINT
|
||||
, SQL_MAXIMUM_DRIVER_CONNECTIONS => 'SQLUSMALLINT' # SMALLINT
|
||||
, SQL_MAXIMUM_IDENTIFIER_LENGTH => 'SQLUSMALLINT' # SMALLINT
|
||||
, SQL_MAXIMUM_SCHEMA_NAME_LENGTH => 'SQLUSMALLINT' # SMALLINT
|
||||
, SQL_MAXIMUM_STMT_OCTETS => 'SQLUSMALLINT' # SMALLINT
|
||||
, SQL_MAXIMUM_STMT_OCTETS_DATA => 'SQLUSMALLINT' # SMALLINT
|
||||
, SQL_MAXIMUM_STMT_OCTETS_SCHEMA => 'SQLUSMALLINT' # SMALLINT
|
||||
, SQL_MAXIMUM_TABLES_IN_SELECT => 'SQLUSMALLINT' # SMALLINT
|
||||
, SQL_MAXIMUM_TABLE_NAME_LENGTH => 'SQLUSMALLINT' # SMALLINT
|
||||
, SQL_MAXIMUM_USER_NAME_LENGTH => 'SQLUSMALLINT' # SMALLINT
|
||||
, SQL_NULL_COLLATION => 'SQLUSMALLINT' # SMALLINT
|
||||
, SQL_ORDER_BY_COLUMNS_IN_SELECT => 'SQLCHAR' # CHARACTER (1)
|
||||
, SQL_OUTER_JOIN_CAPABILITIES => 'SQLUINTEGER bitmask' # INTEGER
|
||||
, SQL_SCROLL_CONCURRENCY => 'SQLUINTEGER bitmask' # INTEGER
|
||||
, SQL_SEARCH_PATTERN_ESCAPE => 'SQLCHAR' # CHARACTER (1)
|
||||
, SQL_SERVER_NAME => 'SQLCHAR' # CHARACTER (128)
|
||||
, SQL_SPECIAL_CHARACTERS => 'SQLCHAR' # CHARACTER (254)
|
||||
, SQL_TRANSACTION_CAPABLE => 'SQLUSMALLINT' # SMALLINT
|
||||
, SQL_TRANSACTION_ISOLATION_OPTION => 'SQLUINTEGER bitmask' # INTEGER
|
||||
, SQL_USER_NAME => 'SQLCHAR' # CHARACTER (128)
|
||||
);
|
||||
|
||||
=head2 %ReturnValues
|
||||
|
||||
See: A.1 C header file SQLCLI.H, Page 317, 318.
|
||||
|
||||
=cut
|
||||
|
||||
$ReturnValues{SQL_ALTER_TABLE} =
|
||||
{
|
||||
SQL_AT_ADD_COLUMN => 0x00000001
|
||||
, SQL_AT_DROP_COLUMN => 0x00000002
|
||||
, SQL_AT_ALTER_COLUMN => 0x00000004
|
||||
, SQL_AT_ADD_CONSTRAINT => 0x00000008
|
||||
, SQL_AT_DROP_CONSTRAINT => 0x00000010
|
||||
};
|
||||
$ReturnValues{SQL_CURSOR_COMMIT_BEHAVIOR} =
|
||||
{
|
||||
SQL_CB_DELETE => 0
|
||||
, SQL_CB_CLOSE => 1
|
||||
, SQL_CB_PRESERVE => 2
|
||||
};
|
||||
$ReturnValues{SQL_FETCH_DIRECTION} =
|
||||
{
|
||||
SQL_FD_FETCH_NEXT => 0x00000001
|
||||
, SQL_FD_FETCH_FIRST => 0x00000002
|
||||
, SQL_FD_FETCH_LAST => 0x00000004
|
||||
, SQL_FD_FETCH_PRIOR => 0x00000008
|
||||
, SQL_FD_FETCH_ABSOLUTE => 0x00000010
|
||||
, SQL_FD_FETCH_RELATIVE => 0x00000020
|
||||
};
|
||||
$ReturnValues{SQL_GETDATA_EXTENSIONS} =
|
||||
{
|
||||
SQL_GD_ANY_COLUMN => 0x00000001
|
||||
, SQL_GD_ANY_ORDER => 0x00000002
|
||||
};
|
||||
$ReturnValues{SQL_IDENTIFIER_CASE} =
|
||||
{
|
||||
SQL_IC_UPPER => 1
|
||||
, SQL_IC_LOWER => 2
|
||||
, SQL_IC_SENSITIVE => 3
|
||||
, SQL_IC_MIXED => 4
|
||||
};
|
||||
$ReturnValues{SQL_NULL_COLLATION} =
|
||||
{
|
||||
SQL_NC_HIGH => 1
|
||||
, SQL_NC_LOW => 2
|
||||
};
|
||||
$ReturnValues{SQL_OUTER_JOIN_CAPABILITIES} =
|
||||
{
|
||||
SQL_OUTER_JOIN_LEFT => 0x00000001
|
||||
, SQL_OUTER_JOIN_RIGHT => 0x00000002
|
||||
, SQL_OUTER_JOIN_FULL => 0x00000004
|
||||
, SQL_OUTER_JOIN_NESTED => 0x00000008
|
||||
, SQL_OUTER_JOIN_NOT_ORDERED => 0x00000010
|
||||
, SQL_OUTER_JOIN_INNER => 0x00000020
|
||||
, SQL_OUTER_JOIN_ALL_COMPARISON_OPS => 0x00000040
|
||||
};
|
||||
$ReturnValues{SQL_SCROLL_CONCURRENCY} =
|
||||
{
|
||||
SQL_SCCO_READ_ONLY => 0x00000001
|
||||
, SQL_SCCO_LOCK => 0x00000002
|
||||
, SQL_SCCO_OPT_ROWVER => 0x00000004
|
||||
, SQL_SCCO_OPT_VALUES => 0x00000008
|
||||
};
|
||||
$ReturnValues{SQL_TRANSACTION_ACCESS_MODE} =
|
||||
{
|
||||
SQL_TRANSACTION_READ_ONLY => 0x00000001
|
||||
, SQL_TRANSACTION_READ_WRITE => 0x00000002
|
||||
};
|
||||
$ReturnValues{SQL_TRANSACTION_CAPABLE} =
|
||||
{
|
||||
SQL_TC_NONE => 0
|
||||
, SQL_TC_DML => 1
|
||||
, SQL_TC_ALL => 2
|
||||
, SQL_TC_DDL_COMMIT => 3
|
||||
, SQL_TC_DDL_IGNORE => 4
|
||||
};
|
||||
$ReturnValues{SQL_TRANSACTION_ISOLATION} =
|
||||
{
|
||||
SQL_TRANSACTION_READ_UNCOMMITTED => 0x00000001
|
||||
, SQL_TRANSACTION_READ_COMMITTED => 0x00000002
|
||||
, SQL_TRANSACTION_REPEATABLE_READ => 0x00000004
|
||||
, SQL_TRANSACTION_SERIALIZABLE => 0x00000008
|
||||
};
|
||||
|
||||
1;
|
||||
|
||||
=head1 TODO
|
||||
|
||||
Corrections, e.g.:
|
||||
|
||||
SQL_TRANSACTION_ISOLATION_OPTION vs. SQL_TRANSACTION_ISOLATION
|
||||
|
||||
=cut
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,104 @@
|
|||
# $Id: GetInfoReturn.pm 8696 2007-01-24 23:12:38Z Tim $
|
||||
#
|
||||
# Copyright (c) 2002 Tim Bunce Ireland
|
||||
#
|
||||
# Constant data describing return values from the DBI getinfo function.
|
||||
#
|
||||
# You may distribute under the terms of either the GNU General Public
|
||||
# License or the Artistic License, as specified in the Perl README file.
|
||||
|
||||
package DBI::Const::GetInfoReturn;
|
||||
|
||||
use strict;
|
||||
|
||||
use Exporter ();
|
||||
|
||||
use vars qw(@ISA @EXPORT @EXPORT_OK %GetInfoReturnTypes %GetInfoReturnValues);
|
||||
|
||||
@ISA = qw(Exporter);
|
||||
@EXPORT = qw(%GetInfoReturnTypes %GetInfoReturnValues);
|
||||
|
||||
my
|
||||
$VERSION = "2.008697";
|
||||
|
||||
=head1 NAME
|
||||
|
||||
DBI::Const::GetInfoReturn - Data and functions for describing GetInfo results
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
The interface to this module is undocumented and liable to change.
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
Data and functions for describing GetInfo results
|
||||
|
||||
=cut
|
||||
|
||||
use DBI::Const::GetInfoType;
|
||||
|
||||
use DBI::Const::GetInfo::ANSI ();
|
||||
use DBI::Const::GetInfo::ODBC ();
|
||||
|
||||
%GetInfoReturnTypes =
|
||||
(
|
||||
%DBI::Const::GetInfo::ANSI::ReturnTypes
|
||||
, %DBI::Const::GetInfo::ODBC::ReturnTypes
|
||||
);
|
||||
|
||||
%GetInfoReturnValues = ();
|
||||
{
|
||||
my $A = \%DBI::Const::GetInfo::ANSI::ReturnValues;
|
||||
my $O = \%DBI::Const::GetInfo::ODBC::ReturnValues;
|
||||
while ( my ($k, $v) = each %$A ) {
|
||||
my %h = ( exists $O->{$k} ) ? ( %$v, %{$O->{$k}} ) : %$v;
|
||||
$GetInfoReturnValues{$k} = \%h;
|
||||
}
|
||||
while ( my ($k, $v) = each %$O ) {
|
||||
next if exists $A->{$k};
|
||||
my %h = %$v;
|
||||
$GetInfoReturnValues{$k} = \%h;
|
||||
}
|
||||
}
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
sub Format {
|
||||
my $InfoType = shift;
|
||||
my $Value = shift;
|
||||
|
||||
return '' unless defined $Value;
|
||||
|
||||
my $ReturnType = $GetInfoReturnTypes{$InfoType};
|
||||
|
||||
return sprintf '0x%08X', $Value if $ReturnType eq 'SQLUINTEGER bitmask';
|
||||
return sprintf '0x%08X', $Value if $ReturnType eq 'SQLINTEGER bitmask';
|
||||
# return '"' . $Value . '"' if $ReturnType eq 'SQLCHAR';
|
||||
return $Value;
|
||||
}
|
||||
|
||||
|
||||
sub Explain {
|
||||
my $InfoType = shift;
|
||||
my $Value = shift;
|
||||
|
||||
return '' unless defined $Value;
|
||||
return '' unless exists $GetInfoReturnValues{$InfoType};
|
||||
|
||||
$Value = int $Value;
|
||||
my $ReturnType = $GetInfoReturnTypes{$InfoType};
|
||||
my %h = reverse %{$GetInfoReturnValues{$InfoType}};
|
||||
|
||||
if ( $ReturnType eq 'SQLUINTEGER bitmask'|| $ReturnType eq 'SQLINTEGER bitmask') {
|
||||
my @a = ();
|
||||
for my $k ( sort { $a <=> $b } keys %h ) {
|
||||
push @a, $h{$k} if $Value & $k;
|
||||
}
|
||||
return wantarray ? @a : join(' ', @a );
|
||||
}
|
||||
else {
|
||||
return $h{$Value} ||'?';
|
||||
}
|
||||
}
|
||||
|
||||
1;
|
|
@ -0,0 +1,53 @@
|
|||
# $Id: GetInfoType.pm 8696 2007-01-24 23:12:38Z Tim $
|
||||
#
|
||||
# Copyright (c) 2002 Tim Bunce Ireland
|
||||
#
|
||||
# Constant data describing info type codes for the DBI getinfo function.
|
||||
#
|
||||
# You may distribute under the terms of either the GNU General Public
|
||||
# License or the Artistic License, as specified in the Perl README file.
|
||||
|
||||
package DBI::Const::GetInfoType;
|
||||
|
||||
use strict;
|
||||
|
||||
use Exporter ();
|
||||
|
||||
use vars qw(@ISA @EXPORT @EXPORT_OK %GetInfoType);
|
||||
|
||||
@ISA = qw(Exporter);
|
||||
@EXPORT = qw(%GetInfoType);
|
||||
|
||||
my
|
||||
$VERSION = "2.008697";
|
||||
|
||||
=head1 NAME
|
||||
|
||||
DBI::Const::GetInfoType - Data describing GetInfo type codes
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
use DBI::Const::GetInfoType;
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
Imports a %GetInfoType hash which maps names for GetInfo Type Codes
|
||||
into their corresponding numeric values. For example:
|
||||
|
||||
$database_version = $dbh->get_info( $GetInfoType{SQL_DBMS_VER} );
|
||||
|
||||
The interface to this module is new and nothing beyond what is
|
||||
written here is guaranteed.
|
||||
|
||||
=cut
|
||||
|
||||
use DBI::Const::GetInfo::ANSI (); # liable to change
|
||||
use DBI::Const::GetInfo::ODBC (); # liable to change
|
||||
|
||||
%GetInfoType =
|
||||
(
|
||||
%DBI::Const::GetInfo::ANSI::InfoTypes # liable to change
|
||||
, %DBI::Const::GetInfo::ODBC::InfoTypes # liable to change
|
||||
);
|
||||
|
||||
1;
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,492 @@
|
|||
package DBI::DBD::Metadata;
|
||||
|
||||
# $Id: Metadata.pm 14213 2010-06-30 19:29:18Z Martin $
|
||||
#
|
||||
# Copyright (c) 1997-2003 Jonathan Leffler, Jochen Wiedmann,
|
||||
# Steffen Goeldner and Tim Bunce
|
||||
#
|
||||
# You may distribute under the terms of either the GNU General Public
|
||||
# License or the Artistic License, as specified in the Perl README file.
|
||||
|
||||
use strict;
|
||||
|
||||
use Exporter ();
|
||||
use Carp;
|
||||
|
||||
use DBI;
|
||||
use DBI::Const::GetInfoType qw(%GetInfoType);
|
||||
|
||||
our @ISA = qw(Exporter);
|
||||
our @EXPORT = qw(write_getinfo_pm write_typeinfo_pm);
|
||||
|
||||
our $VERSION = "2.014214";
|
||||
|
||||
|
||||
=head1 NAME
|
||||
|
||||
DBI::DBD::Metadata - Generate the code and data for some DBI metadata methods
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
The idea is to extract metadata information from a good quality
|
||||
ODBC driver and use it to generate code and data to use in your own
|
||||
DBI driver for the same database.
|
||||
|
||||
To generate code to support the get_info method:
|
||||
|
||||
perl -MDBI::DBD::Metadata -e "write_getinfo_pm('dbi:ODBC:dsn-name','user','pass','Driver')"
|
||||
|
||||
perl -MDBI::DBD::Metadata -e write_getinfo_pm dbi:ODBC:foo_db username password Driver
|
||||
|
||||
To generate code to support the type_info method:
|
||||
|
||||
perl -MDBI::DBD::Metadata -e "write_typeinfo_pm('dbi:ODBC:dsn-name','user','pass','Driver')"
|
||||
|
||||
perl -MDBI::DBD::Metadata -e write_typeinfo_pm dbi:ODBC:dsn-name user pass Driver
|
||||
|
||||
Where C<dbi:ODBC:dsn-name> is the connection to use to extract the
|
||||
data, and C<Driver> is the name of the driver you want the code
|
||||
generated for (the driver name gets embedded into the output in
|
||||
numerous places).
|
||||
|
||||
=head1 Generating a GetInfo package for a driver
|
||||
|
||||
The C<write_getinfo_pm> in the DBI::DBD::Metadata module generates a
|
||||
DBD::Driver::GetInfo package on standard output.
|
||||
|
||||
This method generates a DBD::Driver::GetInfo package from the data
|
||||
source you specified in the parameter list or in the environment
|
||||
variable DBI_DSN.
|
||||
DBD::Driver::GetInfo should help a DBD author implement the DBI
|
||||
get_info() method.
|
||||
Because you are just creating this package, it is very unlikely that
|
||||
DBD::Driver already provides a good implementation for get_info().
|
||||
Thus you will probably connect via DBD::ODBC.
|
||||
|
||||
Once you are sure that it is producing reasonably sane data, you should
|
||||
typically redirect the standard output to lib/DBD/Driver/GetInfo.pm, and
|
||||
then hand edit the result.
|
||||
Do not forget to update your Makefile.PL and MANIFEST to include this as
|
||||
an extra PM file that should be installed.
|
||||
|
||||
If you connect via DBD::ODBC, you should use version 0.38 or greater;
|
||||
|
||||
Please take a critical look at the data returned!
|
||||
ODBC drivers vary dramatically in their quality.
|
||||
|
||||
The generator assumes that most values are static and places these
|
||||
values directly in the %info hash.
|
||||
A few examples show the use of CODE references and the implementation
|
||||
via subroutines.
|
||||
It is very likely that you will have to write additional subroutines for
|
||||
values depending on the session state or server version, e.g.
|
||||
SQL_DBMS_VER.
|
||||
|
||||
A possible implementation of DBD::Driver::db::get_info() may look like:
|
||||
|
||||
sub get_info {
|
||||
my($dbh, $info_type) = @_;
|
||||
require DBD::Driver::GetInfo;
|
||||
my $v = $DBD::Driver::GetInfo::info{int($info_type)};
|
||||
$v = $v->($dbh) if ref $v eq 'CODE';
|
||||
return $v;
|
||||
}
|
||||
|
||||
Please replace Driver (or "<foo>") with the name of your driver.
|
||||
Note that this stub function is generated for you by write_getinfo_pm
|
||||
function, but you must manually transfer the code to Driver.pm.
|
||||
|
||||
=cut
|
||||
|
||||
sub write_getinfo_pm
|
||||
{
|
||||
my ($dsn, $user, $pass, $driver) = @_ ? @_ : @ARGV;
|
||||
my $dbh = DBI->connect($dsn, $user, $pass, {RaiseError=>1});
|
||||
$driver = "<foo>" unless defined $driver;
|
||||
|
||||
print <<PERL;
|
||||
|
||||
# Transfer this to ${driver}.pm
|
||||
|
||||
# The get_info function was automatically generated by
|
||||
# DBI::DBD::Metadata::write_getinfo_pm v$DBI::DBD::Metadata::VERSION.
|
||||
|
||||
package DBD::${driver}::db; # This line can be removed once transferred.
|
||||
|
||||
sub get_info {
|
||||
my(\$dbh, \$info_type) = \@_;
|
||||
require DBD::${driver}::GetInfo;
|
||||
my \$v = \$DBD::${driver}::GetInfo::info{int(\$info_type)};
|
||||
\$v = \$v->(\$dbh) if ref \$v eq 'CODE';
|
||||
return \$v;
|
||||
}
|
||||
|
||||
# Transfer this to lib/DBD/${driver}/GetInfo.pm
|
||||
|
||||
# The \%info hash was automatically generated by
|
||||
# DBI::DBD::Metadata::write_getinfo_pm v$DBI::DBD::Metadata::VERSION.
|
||||
|
||||
package DBD::${driver}::GetInfo;
|
||||
|
||||
use strict;
|
||||
use DBD::${driver};
|
||||
|
||||
# Beware: not officially documented interfaces...
|
||||
# use DBI::Const::GetInfoType qw(\%GetInfoType);
|
||||
# use DBI::Const::GetInfoReturn qw(\%GetInfoReturnTypes \%GetInfoReturnValues);
|
||||
|
||||
my \$sql_driver = '${driver}';
|
||||
my \$sql_ver_fmt = '%02d.%02d.%04d'; # ODBC version string: ##.##.#####
|
||||
my \$sql_driver_ver = sprintf \$sql_ver_fmt, split (/\\./, \$DBD::${driver}::VERSION);
|
||||
PERL
|
||||
|
||||
my $kw_map = 0;
|
||||
{
|
||||
# Informix CLI (ODBC) v3.81.0000 does not return a list of keywords.
|
||||
local $\ = "\n";
|
||||
local $, = "\n";
|
||||
my ($kw) = $dbh->get_info($GetInfoType{SQL_KEYWORDS});
|
||||
if ($kw)
|
||||
{
|
||||
print "\nmy \@Keywords = qw(\n";
|
||||
print sort split /,/, $kw;
|
||||
print ");\n\n";
|
||||
print "sub sql_keywords {\n";
|
||||
print q% return join ',', @Keywords;%;
|
||||
print "\n}\n\n";
|
||||
$kw_map = 1;
|
||||
}
|
||||
}
|
||||
|
||||
print <<'PERL';
|
||||
|
||||
sub sql_data_source_name {
|
||||
my $dbh = shift;
|
||||
return "dbi:$sql_driver:" . $dbh->{Name};
|
||||
}
|
||||
|
||||
sub sql_user_name {
|
||||
my $dbh = shift;
|
||||
# CURRENT_USER is a non-standard attribute, probably undef
|
||||
# Username is a standard DBI attribute
|
||||
return $dbh->{CURRENT_USER} || $dbh->{Username};
|
||||
}
|
||||
|
||||
PERL
|
||||
|
||||
print "\nour \%info = (\n";
|
||||
foreach my $key (sort keys %GetInfoType)
|
||||
{
|
||||
my $num = $GetInfoType{$key};
|
||||
my $val = eval { $dbh->get_info($num); };
|
||||
if ($key eq 'SQL_DATA_SOURCE_NAME') {
|
||||
$val = '\&sql_data_source_name';
|
||||
}
|
||||
elsif ($key eq 'SQL_KEYWORDS') {
|
||||
$val = ($kw_map) ? '\&sql_keywords' : 'undef';
|
||||
}
|
||||
elsif ($key eq 'SQL_DRIVER_NAME') {
|
||||
$val = "\$INC{'DBD/$driver.pm'}";
|
||||
}
|
||||
elsif ($key eq 'SQL_DRIVER_VER') {
|
||||
$val = '$sql_driver_ver';
|
||||
}
|
||||
elsif ($key eq 'SQL_USER_NAME') {
|
||||
$val = '\&sql_user_name';
|
||||
}
|
||||
elsif (not defined $val) {
|
||||
$val = 'undef';
|
||||
}
|
||||
elsif ($val eq '') {
|
||||
$val = "''";
|
||||
}
|
||||
elsif ($val =~ /\D/) {
|
||||
$val =~ s/\\/\\\\/g;
|
||||
$val =~ s/'/\\'/g;
|
||||
$val = "'$val'";
|
||||
}
|
||||
printf "%s %5d => %-30s # %s\n", (($val eq 'undef') ? '#' : ' '), $num, "$val,", $key;
|
||||
}
|
||||
print ");\n\n1;\n\n__END__\n";
|
||||
}
|
||||
|
||||
|
||||
|
||||
=head1 Generating a TypeInfo package for a driver
|
||||
|
||||
The C<write_typeinfo_pm> function in the DBI::DBD::Metadata module generates
|
||||
on standard output the data needed for a driver's type_info_all method.
|
||||
It also provides default implementations of the type_info_all
|
||||
method for inclusion in the driver's main implementation file.
|
||||
|
||||
The driver parameter is the name of the driver for which the methods
|
||||
will be generated; for the sake of examples, this will be "Driver".
|
||||
Typically, the dsn parameter will be of the form "dbi:ODBC:odbc_dsn",
|
||||
where the odbc_dsn is a DSN for one of the driver's databases.
|
||||
The user and pass parameters are the other optional connection
|
||||
parameters that will be provided to the DBI connect method.
|
||||
|
||||
Once you are sure that it is producing reasonably sane data, you should
|
||||
typically redirect the standard output to lib/DBD/Driver/TypeInfo.pm,
|
||||
and then hand edit the result if necessary.
|
||||
Do not forget to update your Makefile.PL and MANIFEST to include this as
|
||||
an extra PM file that should be installed.
|
||||
|
||||
Please take a critical look at the data returned!
|
||||
ODBC drivers vary dramatically in their quality.
|
||||
|
||||
The generator assumes that all the values are static and places these
|
||||
values directly in the %info hash.
|
||||
|
||||
A possible implementation of DBD::Driver::type_info_all() may look like:
|
||||
|
||||
sub type_info_all {
|
||||
my ($dbh) = @_;
|
||||
require DBD::Driver::TypeInfo;
|
||||
return [ @$DBD::Driver::TypeInfo::type_info_all ];
|
||||
}
|
||||
|
||||
Please replace Driver (or "<foo>") with the name of your driver.
|
||||
Note that this stub function is generated for you by the write_typeinfo_pm
|
||||
function, but you must manually transfer the code to Driver.pm.
|
||||
|
||||
=cut
|
||||
|
||||
|
||||
# These two are used by fmt_value...
|
||||
my %dbi_inv;
|
||||
my %sql_type_inv;
|
||||
|
||||
#-DEBUGGING-#
|
||||
#sub print_hash
|
||||
#{
|
||||
# my ($name, %hash) = @_;
|
||||
# print "Hash: $name\n";
|
||||
# foreach my $key (keys %hash)
|
||||
# {
|
||||
# print "$key => $hash{$key}\n";
|
||||
# }
|
||||
#}
|
||||
#-DEBUGGING-#
|
||||
|
||||
sub inverse_hash
|
||||
{
|
||||
my (%hash) = @_;
|
||||
my (%inv);
|
||||
foreach my $key (keys %hash)
|
||||
{
|
||||
my $val = $hash{$key};
|
||||
die "Double mapping for key value $val ($inv{$val}, $key)!"
|
||||
if (defined $inv{$val});
|
||||
$inv{$val} = $key;
|
||||
}
|
||||
return %inv;
|
||||
}
|
||||
|
||||
sub fmt_value
|
||||
{
|
||||
my ($num, $val) = @_;
|
||||
if (!defined $val)
|
||||
{
|
||||
$val = "undef";
|
||||
}
|
||||
elsif ($val !~ m/^[-+]?\d+$/)
|
||||
{
|
||||
# All the numbers in type_info_all are integers!
|
||||
# Anything that isn't an integer is a string.
|
||||
# Ensure that no double quotes screw things up.
|
||||
$val =~ s/"/\\"/g if ($val =~ m/"/o);
|
||||
$val = qq{"$val"};
|
||||
}
|
||||
elsif ($dbi_inv{$num} =~ m/^(SQL_)?DATA_TYPE$/)
|
||||
{
|
||||
# All numeric...
|
||||
$val = $sql_type_inv{$val}
|
||||
if (defined $sql_type_inv{$val});
|
||||
}
|
||||
return $val;
|
||||
}
|
||||
|
||||
sub write_typeinfo_pm
|
||||
{
|
||||
my ($dsn, $user, $pass, $driver) = @_ ? @_ : @ARGV;
|
||||
my $dbh = DBI->connect($dsn, $user, $pass, {AutoCommit=>1, RaiseError=>1});
|
||||
$driver = "<foo>" unless defined $driver;
|
||||
|
||||
print <<PERL;
|
||||
|
||||
# Transfer this to ${driver}.pm
|
||||
|
||||
# The type_info_all function was automatically generated by
|
||||
# DBI::DBD::Metadata::write_typeinfo_pm v$DBI::DBD::Metadata::VERSION.
|
||||
|
||||
package DBD::${driver}::db; # This line can be removed once transferred.
|
||||
|
||||
sub type_info_all
|
||||
{
|
||||
my (\$dbh) = \@_;
|
||||
require DBD::${driver}::TypeInfo;
|
||||
return [ \@\$DBD::${driver}::TypeInfo::type_info_all ];
|
||||
}
|
||||
|
||||
# Transfer this to lib/DBD/${driver}/TypeInfo.pm.
|
||||
# Don't forget to add version and intellectual property control information.
|
||||
|
||||
# The \%type_info_all hash was automatically generated by
|
||||
# DBI::DBD::Metadata::write_typeinfo_pm v$DBI::DBD::Metadata::VERSION.
|
||||
|
||||
package DBD::${driver}::TypeInfo;
|
||||
|
||||
{
|
||||
require Exporter;
|
||||
require DynaLoader;
|
||||
\@ISA = qw(Exporter DynaLoader);
|
||||
\@EXPORT = qw(type_info_all);
|
||||
use DBI qw(:sql_types);
|
||||
|
||||
PERL
|
||||
|
||||
# Generate SQL type name mapping hashes.
|
||||
# See code fragment in DBI specification.
|
||||
my %sql_type_map;
|
||||
foreach (@{$DBI::EXPORT_TAGS{sql_types}})
|
||||
{
|
||||
no strict 'refs';
|
||||
$sql_type_map{$_} = &{"DBI::$_"}();
|
||||
$sql_type_inv{$sql_type_map{$_}} = $_;
|
||||
}
|
||||
#-DEBUG-# print_hash("sql_type_map", %sql_type_map);
|
||||
#-DEBUG-# print_hash("sql_type_inv", %sql_type_inv);
|
||||
|
||||
my %dbi_map =
|
||||
(
|
||||
TYPE_NAME => 0,
|
||||
DATA_TYPE => 1,
|
||||
COLUMN_SIZE => 2,
|
||||
LITERAL_PREFIX => 3,
|
||||
LITERAL_SUFFIX => 4,
|
||||
CREATE_PARAMS => 5,
|
||||
NULLABLE => 6,
|
||||
CASE_SENSITIVE => 7,
|
||||
SEARCHABLE => 8,
|
||||
UNSIGNED_ATTRIBUTE => 9,
|
||||
FIXED_PREC_SCALE => 10,
|
||||
AUTO_UNIQUE_VALUE => 11,
|
||||
LOCAL_TYPE_NAME => 12,
|
||||
MINIMUM_SCALE => 13,
|
||||
MAXIMUM_SCALE => 14,
|
||||
SQL_DATA_TYPE => 15,
|
||||
SQL_DATETIME_SUB => 16,
|
||||
NUM_PREC_RADIX => 17,
|
||||
INTERVAL_PRECISION => 18,
|
||||
);
|
||||
|
||||
#-DEBUG-# print_hash("dbi_map", %dbi_map);
|
||||
|
||||
%dbi_inv = inverse_hash(%dbi_map);
|
||||
|
||||
#-DEBUG-# print_hash("dbi_inv", %dbi_inv);
|
||||
|
||||
my $maxlen = 0;
|
||||
foreach my $key (keys %dbi_map)
|
||||
{
|
||||
$maxlen = length($key) if length($key) > $maxlen;
|
||||
}
|
||||
|
||||
# Print the name/value mapping entry in the type_info_all array;
|
||||
my $fmt = " \%-${maxlen}s => \%2d,\n";
|
||||
my $numkey = 0;
|
||||
my $maxkey = 0;
|
||||
print " \$type_info_all = [\n {\n";
|
||||
foreach my $i (sort { $a <=> $b } keys %dbi_inv)
|
||||
{
|
||||
printf($fmt, $dbi_inv{$i}, $i);
|
||||
$numkey++;
|
||||
$maxkey = $i;
|
||||
}
|
||||
print " },\n";
|
||||
|
||||
print STDERR "### WARNING - Non-dense set of keys ($numkey keys, $maxkey max key)\n"
|
||||
unless $numkey = $maxkey + 1;
|
||||
|
||||
my $h = $dbh->type_info_all;
|
||||
my @tia = @$h;
|
||||
my %odbc_map = map { uc $_ => $tia[0]->{$_} } keys %{$tia[0]};
|
||||
shift @tia; # Remove the mapping reference.
|
||||
my $numtyp = $#tia;
|
||||
|
||||
#-DEBUG-# print_hash("odbc_map", %odbc_map);
|
||||
|
||||
# In theory, the key/number mapping sequence for %dbi_map
|
||||
# should be the same as the one from the ODBC driver. However, to
|
||||
# prevent the possibility of mismatches, and to deal with older
|
||||
# missing attributes or unexpected new ones, we chase back through
|
||||
# the %dbi_inv and %odbc_map hashes, generating @dbi_to_odbc
|
||||
# to map our new key number to the old one.
|
||||
# Report if @dbi_to_odbc is not an identity mapping.
|
||||
my @dbi_to_odbc;
|
||||
foreach my $num (sort { $a <=> $b } keys %dbi_inv)
|
||||
{
|
||||
# Find the name in %dbi_inv that matches this index number.
|
||||
my $dbi_key = $dbi_inv{$num};
|
||||
#-DEBUG-# print "dbi_key = $dbi_key\n";
|
||||
#-DEBUG-# print "odbc_key = $odbc_map{$dbi_key}\n";
|
||||
# Find the index in %odbc_map that has this key.
|
||||
$dbi_to_odbc[$num] = (defined $odbc_map{$dbi_key}) ? $odbc_map{$dbi_key} : undef;
|
||||
}
|
||||
|
||||
# Determine the length of the longest formatted value in each field
|
||||
my @len;
|
||||
for (my $i = 0; $i <= $numtyp; $i++)
|
||||
{
|
||||
my @odbc_val = @{$tia[$i]};
|
||||
for (my $num = 0; $num <= $maxkey; $num++)
|
||||
{
|
||||
# Find the value of the entry in the @odbc_val array.
|
||||
my $val = (defined $dbi_to_odbc[$num]) ? $odbc_val[$dbi_to_odbc[$num]] : undef;
|
||||
$val = fmt_value($num, $val);
|
||||
#-DEBUG-# print "val = $val\n";
|
||||
$val = "$val,";
|
||||
$len[$num] = length($val) if !defined $len[$num] || length($val) > $len[$num];
|
||||
}
|
||||
}
|
||||
|
||||
# Generate format strings to left justify each string in maximum field width.
|
||||
my @fmt;
|
||||
for (my $i = 0; $i <= $maxkey; $i++)
|
||||
{
|
||||
$fmt[$i] = "%-$len[$i]s";
|
||||
#-DEBUG-# print "fmt[$i] = $fmt[$i]\n";
|
||||
}
|
||||
|
||||
# Format the data from type_info_all
|
||||
for (my $i = 0; $i <= $numtyp; $i++)
|
||||
{
|
||||
my @odbc_val = @{$tia[$i]};
|
||||
print " [ ";
|
||||
for (my $num = 0; $num <= $maxkey; $num++)
|
||||
{
|
||||
# Find the value of the entry in the @odbc_val array.
|
||||
my $val = (defined $dbi_to_odbc[$num]) ? $odbc_val[$dbi_to_odbc[$num]] : undef;
|
||||
$val = fmt_value($num, $val);
|
||||
printf $fmt[$num], "$val,";
|
||||
}
|
||||
print " ],\n";
|
||||
}
|
||||
|
||||
print " ];\n\n 1;\n}\n\n__END__\n";
|
||||
|
||||
}
|
||||
|
||||
1;
|
||||
|
||||
__END__
|
||||
|
||||
=head1 AUTHORS
|
||||
|
||||
Jonathan Leffler <jleffler@us.ibm.com> (previously <jleffler@informix.com>),
|
||||
Jochen Wiedmann <joe@ispsoft.de>,
|
||||
Steffen Goeldner <sgoeldner@cpan.org>,
|
||||
and Tim Bunce <dbi-users@perl.org>.
|
||||
|
||||
=cut
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,851 @@
|
|||
=head1 NAME
|
||||
|
||||
DBI::DBD::SqlEngine::Developers - Developers documentation for DBI::DBD::SqlEngine
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
package DBD::myDriver;
|
||||
|
||||
use base qw(DBI::DBD::SqlEngine);
|
||||
|
||||
sub driver
|
||||
{
|
||||
...
|
||||
my $drh = $proto->SUPER::driver($attr);
|
||||
...
|
||||
return $drh->{class};
|
||||
}
|
||||
|
||||
sub CLONE { ... }
|
||||
|
||||
package DBD::myDriver::dr;
|
||||
|
||||
@ISA = qw(DBI::DBD::SqlEngine::dr);
|
||||
|
||||
sub data_sources { ... }
|
||||
...
|
||||
|
||||
package DBD::myDriver::db;
|
||||
|
||||
@ISA = qw(DBI::DBD::SqlEngine::db);
|
||||
|
||||
sub init_valid_attributes { ... }
|
||||
sub init_default_attributes { ... }
|
||||
sub set_versions { ... }
|
||||
sub validate_STORE_attr { my ($dbh, $attrib, $value) = @_; ... }
|
||||
sub validate_FETCH_attr { my ($dbh, $attrib) = @_; ... }
|
||||
sub get_myd_versions { ... }
|
||||
sub get_avail_tables { ... }
|
||||
|
||||
package DBD::myDriver::st;
|
||||
|
||||
@ISA = qw(DBI::DBD::SqlEngine::st);
|
||||
|
||||
sub FETCH { ... }
|
||||
sub STORE { ... }
|
||||
|
||||
package DBD::myDriver::Statement;
|
||||
|
||||
@ISA = qw(DBI::DBD::SqlEngine::Statement);
|
||||
|
||||
sub open_table { ... }
|
||||
|
||||
package DBD::myDriver::Table;
|
||||
|
||||
@ISA = qw(DBI::DBD::SqlEngine::Table);
|
||||
|
||||
my %reset_on_modify = (
|
||||
myd_abc => "myd_foo",
|
||||
myd_mno => "myd_bar",
|
||||
);
|
||||
__PACKAGE__->register_reset_on_modify( \%reset_on_modify );
|
||||
my %compat_map = (
|
||||
abc => 'foo_abc',
|
||||
xyz => 'foo_xyz',
|
||||
);
|
||||
__PACKAGE__->register_compat_map( \%compat_map );
|
||||
|
||||
sub bootstrap_table_meta { ... }
|
||||
sub init_table_meta { ... }
|
||||
sub table_meta_attr_changed { ... }
|
||||
sub open_data { ... }
|
||||
|
||||
sub new { ... }
|
||||
|
||||
sub fetch_row { ... }
|
||||
sub push_row { ... }
|
||||
sub push_names { ... }
|
||||
sub seek { ... }
|
||||
sub truncate { ... }
|
||||
sub drop { ... }
|
||||
|
||||
# optimize the SQL engine by add one or more of
|
||||
sub update_current_row { ... }
|
||||
# or
|
||||
sub update_specific_row { ... }
|
||||
# or
|
||||
sub update_one_row { ... }
|
||||
# or
|
||||
sub insert_new_row { ... }
|
||||
# or
|
||||
sub delete_current_row { ... }
|
||||
# or
|
||||
sub delete_one_row { ... }
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
This document describes the interface of DBI::DBD::SqlEngine for DBD
|
||||
developers who write DBI::DBD::SqlEngine based DBI drivers. It supplements
|
||||
L<DBI::DBD> and L<DBI::DBD::SqlEngine::HowTo>, which you should read first.
|
||||
|
||||
=head1 CLASSES
|
||||
|
||||
Each DBI driver must provide a package global C<< driver >> method and
|
||||
three DBI related classes:
|
||||
|
||||
=over 4
|
||||
|
||||
=item DBI::DBD::SqlEngine::dr
|
||||
|
||||
Driver package, contains the methods DBI calls indirectly via DBI
|
||||
interface:
|
||||
|
||||
DBI->connect ('DBI:DBM:', undef, undef, {})
|
||||
|
||||
# invokes
|
||||
package DBD::DBM::dr;
|
||||
@DBD::DBM::dr::ISA = qw(DBI::DBD::SqlEngine::dr);
|
||||
|
||||
sub connect ($$;$$$)
|
||||
{
|
||||
...
|
||||
}
|
||||
|
||||
Similar for C<data_sources ()> and C<disconnect_all()>.
|
||||
|
||||
Pure Perl DBI drivers derived from DBI::DBD::SqlEngine usually don't need to
|
||||
override any of the methods provided through the DBD::XXX::dr package.
|
||||
However if you need additional initialization not fitting in
|
||||
C<init_valid_attributes()> and C<init_default_attributes()> of you're ::db
|
||||
class, the connect method might be the final place to be modified.
|
||||
|
||||
=item DBI::DBD::SqlEngine::db
|
||||
|
||||
Contains the methods which are called through DBI database handles
|
||||
(C<< $dbh >>). e.g.,
|
||||
|
||||
$sth = $dbh->prepare ("select * from foo");
|
||||
# returns the f_encoding setting for table foo
|
||||
$dbh->csv_get_meta ("foo", "f_encoding");
|
||||
|
||||
DBI::DBD::SqlEngine provides the typical methods required here. Developers who
|
||||
write DBI drivers based on DBI::DBD::SqlEngine need to override the methods
|
||||
C<< set_versions >> and C<< init_valid_attributes >>.
|
||||
|
||||
=item DBI::DBD::SqlEngine::TieMeta;
|
||||
|
||||
Provides the tie-magic for C<< $dbh->{$drv_pfx . "_meta"} >>. Routes
|
||||
C<STORE> through C<< $drv->set_sql_engine_meta() >> and C<FETCH> through
|
||||
C<< $drv->get_sql_engine_meta() >>. C<DELETE> is not supported, you have
|
||||
to execute a C<DROP TABLE> statement, where applicable.
|
||||
|
||||
=item DBI::DBD::SqlEngine::TieTables;
|
||||
|
||||
Provides the tie-magic for tables in C<< $dbh->{$drv_pfx . "_meta"} >>.
|
||||
Routes C<STORE> though C<< $tblClass->set_table_meta_attr() >> and C<FETCH>
|
||||
though C<< $tblClass->get_table_meta_attr() >>. C<DELETE> removes an
|
||||
attribute from the I<meta object> retrieved by
|
||||
C<< $tblClass->get_table_meta() >>.
|
||||
|
||||
=item DBI::DBD::SqlEngine::st
|
||||
|
||||
Contains the methods to deal with prepared statement handles. e.g.,
|
||||
|
||||
$sth->execute () or die $sth->errstr;
|
||||
|
||||
=item DBI::DBD::SqlEngine::TableSource;
|
||||
|
||||
Base class for 3rd party table sources:
|
||||
|
||||
$dbh->{sql_table_source} = "DBD::Foo::TableSource";
|
||||
|
||||
=item DBI::DBD::SqlEngine::DataSource;
|
||||
|
||||
Base class for 3rd party data sources:
|
||||
|
||||
$dbh->{sql_data_source} = "DBD::Foo::DataSource";
|
||||
|
||||
=item DBI::DBD::SqlEngine::Statement;
|
||||
|
||||
Base class for derived drivers statement engine. Implements C<open_table>.
|
||||
|
||||
=item DBI::DBD::SqlEngine::Table;
|
||||
|
||||
Contains tailoring between SQL engine's requirements and
|
||||
C<DBI::DBD::SqlEngine> magic for finding the right tables and storage.
|
||||
Builds bridges between C<sql_meta> handling of C<DBI::DBD::SqlEngine::db>,
|
||||
table initialization for SQL engines and I<meta object>'s attribute
|
||||
management for derived drivers.
|
||||
|
||||
=back
|
||||
|
||||
=head2 DBI::DBD::SqlEngine
|
||||
|
||||
This is the main package containing the routines to initialize
|
||||
DBI::DBD::SqlEngine based DBI drivers. Primarily the
|
||||
C<< DBI::DBD::SqlEngine::driver >> method is invoked, either directly
|
||||
from DBI when the driver is initialized or from the derived class.
|
||||
|
||||
package DBD::DBM;
|
||||
|
||||
use base qw( DBI::DBD::SqlEngine );
|
||||
|
||||
sub driver
|
||||
{
|
||||
my ( $class, $attr ) = @_;
|
||||
...
|
||||
my $drh = $class->SUPER::driver( $attr );
|
||||
...
|
||||
return $drh;
|
||||
}
|
||||
|
||||
It is not necessary to implement your own driver method as long as
|
||||
additional initialization (e.g. installing more private driver
|
||||
methods) is not required. You do not need to call C<< setup_driver >>
|
||||
as DBI::DBD::SqlEngine takes care of it.
|
||||
|
||||
=head2 DBI::DBD::SqlEngine::dr
|
||||
|
||||
The driver package contains the methods DBI calls indirectly via the DBI
|
||||
interface (see L<DBI/DBI Class Methods>).
|
||||
|
||||
DBI::DBD::SqlEngine based DBI drivers usually do not need to implement anything here,
|
||||
it is enough to do the basic initialization:
|
||||
|
||||
package DBD:XXX::dr;
|
||||
|
||||
@DBD::XXX::dr::ISA = qw (DBI::DBD::SqlEngine::dr);
|
||||
$DBD::XXX::dr::imp_data_size = 0;
|
||||
$DBD::XXX::dr::data_sources_attr = undef;
|
||||
$DBD::XXX::ATTRIBUTION = "DBD::XXX $DBD::XXX::VERSION by Hans Mustermann";
|
||||
|
||||
=head3 Methods provided by C<< DBI::DBD::SqlEngine::dr >>:
|
||||
|
||||
=over 4
|
||||
|
||||
=item connect
|
||||
|
||||
Supervises the driver bootstrap when calling
|
||||
|
||||
DBI->connect( "dbi:Foo", , , { ... } );
|
||||
|
||||
First it instantiates a new driver using C<DBI::_new_dbh>. After that,
|
||||
initial bootstrap of the newly instantiated driver is done by
|
||||
|
||||
$dbh->func( 0, "init_default_attributes" );
|
||||
|
||||
The first argument (C<0>) signals that this is the very first call to
|
||||
C<init_default_attributes>. Modern drivers understand that and do early
|
||||
stage setup here after calling
|
||||
|
||||
package DBD::Foo::db;
|
||||
our @DBD::Foo::db::ISA = qw(DBI::DBD::SqlEngine::db);
|
||||
|
||||
sub init_default_attributes
|
||||
{
|
||||
my ($dbh, $phase) = @_;
|
||||
$dbh->SUPER::init_default_attributes($phase);
|
||||
...; # own setup code, maybe separated by phases
|
||||
}
|
||||
|
||||
When the C<$phase> argument is passed down until
|
||||
C<DBI::DBD::SqlEngine::db::init_default_attributes>, C<connect()> recognizes
|
||||
a I<modern> driver and initializes the attributes from I<DSN> and I<$attr>
|
||||
arguments passed via C<< DBI->connect( $dsn, $user, $pass, \%attr ) >>.
|
||||
|
||||
At the end of the attribute initialization after I<phase 0>, C<connect()>
|
||||
invoked C<init_default_attributes> again for I<phase 1>:
|
||||
|
||||
$dbh->func( 1, "init_default_attributes" );
|
||||
|
||||
=item data_sources
|
||||
|
||||
Returns a list of I<DSN>'s using the C<data_sources> method of the
|
||||
class specified in C<< $dbh->{sql_table_source} >> or via C<\%attr>:
|
||||
|
||||
@ary = DBI->data_sources($driver);
|
||||
@ary = DBI->data_sources($driver, \%attr);
|
||||
|
||||
=item disconnect_all
|
||||
|
||||
C<DBI::DBD::SqlEngine> doesn't have an overall driver cache, so nothing
|
||||
happens here at all.
|
||||
|
||||
=back
|
||||
|
||||
=head2 DBI::DBD::SqlEngine::db
|
||||
|
||||
This package defines the database methods, which are called via the DBI
|
||||
database handle C<< $dbh >>.
|
||||
|
||||
=head3 Methods provided by C<< DBI::DBD::SqlEngine::db >>:
|
||||
|
||||
=over 4
|
||||
|
||||
=item ping
|
||||
|
||||
Simply returns the content of the C<< Active >> attribute. Override
|
||||
when your driver needs more complicated actions here.
|
||||
|
||||
=item prepare
|
||||
|
||||
Prepares a new SQL statement to execute. Returns a statement handle,
|
||||
C<< $sth >> - instance of the DBD:XXX::st. It is neither required nor
|
||||
recommended to override this method.
|
||||
|
||||
=item validate_FETCH_attr
|
||||
|
||||
Called by C<FETCH> to allow inherited drivers do their own attribute
|
||||
name validation. Calling convention is similar to C<FETCH> and the
|
||||
return value is the approved attribute name.
|
||||
|
||||
return $validated_attribute_name;
|
||||
|
||||
In case of validation fails (e.g. accessing private attribute or similar),
|
||||
C<validate_FETCH_attr> is permitted to throw an exception.
|
||||
|
||||
=item FETCH
|
||||
|
||||
Fetches an attribute of a DBI database object. Private handle attributes
|
||||
must have a prefix (this is mandatory). If a requested attribute is
|
||||
detected as a private attribute without a valid prefix, the driver prefix
|
||||
(written as C<$drv_prefix>) is added.
|
||||
|
||||
The driver prefix is extracted from the attribute name and verified against
|
||||
C<< $dbh->{ $drv_prefix . "valid_attrs" } >> (when it exists). If the
|
||||
requested attribute value is not listed as a valid attribute, this method
|
||||
croaks. If the attribute is valid and readonly (listed in C<< $dbh->{
|
||||
$drv_prefix . "readonly_attrs" } >> when it exists), a real copy of the
|
||||
attribute value is returned. So it's not possible to modify
|
||||
C<f_valid_attrs> from outside of DBI::DBD::SqlEngine::db or a derived class.
|
||||
|
||||
=item validate_STORE_attr
|
||||
|
||||
Called by C<STORE> to allow inherited drivers do their own attribute
|
||||
name validation. Calling convention is similar to C<STORE> and the
|
||||
return value is the approved attribute name followed by the approved
|
||||
new value.
|
||||
|
||||
return ($validated_attribute_name, $validated_attribute_value);
|
||||
|
||||
In case of validation fails (e.g. accessing private attribute or similar),
|
||||
C<validate_STORE_attr> is permitted to throw an exception
|
||||
(C<DBI::DBD::SqlEngine::db::validate_STORE_attr> throws an exception when
|
||||
someone tries to assign value other than C<SQL_IC_UPPER .. SQL_IC_MIXED>
|
||||
to C<< $dbh->{sql_identifier_case} >> or
|
||||
C<< $dbh->{sql_quoted_identifier_case} >>).
|
||||
|
||||
=item STORE
|
||||
|
||||
Stores a database private attribute. Private handle attributes must have a
|
||||
prefix (this is mandatory). If a requested attribute is detected as a private
|
||||
attribute without a valid prefix, the driver prefix (written as
|
||||
C<$drv_prefix>) is added. If the database handle has an attribute
|
||||
C<${drv_prefix}_valid_attrs> - for attribute names which are not listed in
|
||||
that hash, this method croaks. If the database handle has an attribute
|
||||
C<${drv_prefix}_readonly_attrs>, only attributes which are not listed there
|
||||
can be stored (once they are initialized). Trying to overwrite such an
|
||||
immutable attribute forces this method to croak.
|
||||
|
||||
An example of a valid attributes list can be found in
|
||||
C<< DBI::DBD::SqlEngine::db::init_valid_attributes >>.
|
||||
|
||||
=item set_versions
|
||||
|
||||
This method sets the attributes C<< f_version >>, C<< sql_nano_version >>,
|
||||
C<< sql_statement_version >> and (if not prohibited by a restrictive
|
||||
C<< ${prefix}_valid_attrs >>) C<< ${prefix}_version >>.
|
||||
|
||||
This method is called at the end of the C<< connect () >> phase.
|
||||
|
||||
When overriding this method, do not forget to invoke the superior one.
|
||||
|
||||
=item init_valid_attributes
|
||||
|
||||
This method is called after the database handle is instantiated as the
|
||||
first attribute initialization.
|
||||
|
||||
C<< DBI::DBD::SqlEngine::db::init_valid_attributes >> initializes the
|
||||
attributes C<sql_valid_attrs> and C<sql_readonly_attrs>.
|
||||
|
||||
When overriding this method, do not forget to invoke the superior one,
|
||||
preferably before doing anything else.
|
||||
|
||||
=item init_default_attributes
|
||||
|
||||
This method is called after the database handle is instantiated to
|
||||
initialize the default attributes. It expects one argument: C<$phase>.
|
||||
If C<$phase> is not given, C<connect> of C<DBI::DBD::SqlEngine::dr>
|
||||
expects this is an old-fashioned driver which isn't capable of multi-phased
|
||||
initialization.
|
||||
|
||||
C<< DBI::DBD::SqlEngine::db::init_default_attributes >> initializes the
|
||||
attributes C<sql_identifier_case>, C<sql_quoted_identifier_case>,
|
||||
C<sql_handler>, C<sql_init_order>, C<sql_meta>, C<sql_engine_version>,
|
||||
C<sql_nano_version> and C<sql_statement_version> when L<SQL::Statement>
|
||||
is available.
|
||||
|
||||
It sets C<sql_init_order> to the given C<$phase>.
|
||||
|
||||
When the derived implementor class provides the attribute to validate
|
||||
attributes (e.g. C<< $dbh->{dbm_valid_attrs} = {...}; >>) or the attribute
|
||||
containing the immutable attributes (e.g. C<< $dbh->{dbm_readonly_attrs}
|
||||
= {...}; >>), the attributes C<drv_valid_attrs>, C<drv_readonly_attrs> and
|
||||
C<drv_version> are added (when available) to the list of valid and
|
||||
immutable attributes (where C<drv_> is interpreted as the driver prefix).
|
||||
|
||||
=item get_versions
|
||||
|
||||
This method is called by the code injected into the instantiated driver to
|
||||
provide the user callable driver method C<< ${prefix}versions >> (e.g.
|
||||
C<< dbm_versions >>, C<< csv_versions >>, ...).
|
||||
|
||||
The DBI::DBD::SqlEngine implementation returns all version information known by
|
||||
DBI::DBD::SqlEngine (e.g. DBI version, Perl version, DBI::DBD::SqlEngine version and
|
||||
the SQL handler version).
|
||||
|
||||
C<get_versions> takes the C<$dbh> as the first argument and optionally a
|
||||
second argument containing a table name. The second argument is not
|
||||
evaluated in C<< DBI::DBD::SqlEngine::db::get_versions >> itself - but
|
||||
might be in the future.
|
||||
|
||||
If the derived implementor class provides a method named
|
||||
C<get_${drv_prefix}versions>, this is invoked and the return value of
|
||||
it is associated to the derived driver name:
|
||||
|
||||
if (my $dgv = $dbh->{ImplementorClass}->can ("get_" . $drv_prefix . "versions") {
|
||||
(my $derived_driver = $dbh->{ImplementorClass}) =~ s/::db$//;
|
||||
$versions{$derived_driver} = &$dgv ($dbh, $table);
|
||||
}
|
||||
|
||||
Override it to add more version information about your module, (e.g.
|
||||
some kind of parser version in case of DBD::CSV, ...), if one line is not
|
||||
enough room to provide all relevant information.
|
||||
|
||||
=item sql_parser_object
|
||||
|
||||
Returns a L<SQL::Parser> instance, when C<< sql_handler >> is set to
|
||||
"SQL::Statement". The parser instance is stored in C<< sql_parser_object >>.
|
||||
|
||||
It is not recommended to override this method.
|
||||
|
||||
=item disconnect
|
||||
|
||||
Disconnects from a database. All local table information is discarded and
|
||||
the C<< Active >> attribute is set to 0.
|
||||
|
||||
=item type_info_all
|
||||
|
||||
Returns information about all the types supported by DBI::DBD::SqlEngine.
|
||||
|
||||
=item table_info
|
||||
|
||||
Returns a statement handle which is prepared to deliver information about
|
||||
all known tables.
|
||||
|
||||
=item list_tables
|
||||
|
||||
Returns a list of all known table names.
|
||||
|
||||
=item quote
|
||||
|
||||
Quotes a string for use in SQL statements.
|
||||
|
||||
=item commit
|
||||
|
||||
Warns about a useless call (if warnings enabled) and returns.
|
||||
DBI::DBD::SqlEngine is typically a driver which commits every action
|
||||
instantly when executed.
|
||||
|
||||
=item rollback
|
||||
|
||||
Warns about a useless call (if warnings enabled) and returns.
|
||||
DBI::DBD::SqlEngine is typically a driver which commits every action
|
||||
instantly when executed.
|
||||
|
||||
=back
|
||||
|
||||
=head3 Attributes used by C<< DBI::DBD::SqlEngine::db >>:
|
||||
|
||||
This section describes attributes which are important to developers of DBI
|
||||
Database Drivers derived from C<DBI::DBD::SqlEngine>.
|
||||
|
||||
=over 4
|
||||
|
||||
=item sql_init_order
|
||||
|
||||
This attribute contains a hash with priorities as key and an array
|
||||
containing the C<$dbh> attributes to be initialized during before/after
|
||||
other attributes.
|
||||
|
||||
C<DBI::DBD::SqlEngine> initializes following attributes:
|
||||
|
||||
$dbh->{sql_init_order} = {
|
||||
0 => [qw( Profile RaiseError PrintError AutoCommit )],
|
||||
90 => [ "sql_meta", $dbh->{$drv_pfx_meta} ? $dbh->{$drv_pfx_meta} : () ]
|
||||
}
|
||||
|
||||
The default priority of not listed attribute keys is C<50>. It is well
|
||||
known that a lot of attributes needed to be set before some table settings
|
||||
are initialized. For example, for L<DBD::DBM>, when using
|
||||
|
||||
my $dbh = DBI->connect( "dbi:DBM:", undef, undef, {
|
||||
f_dir => "/path/to/dbm/databases",
|
||||
dbm_type => "BerkeleyDB",
|
||||
dbm_mldbm => "JSON", # use MLDBM::Serializer::JSON
|
||||
dbm_tables => {
|
||||
quick => {
|
||||
dbm_type => "GDBM_File",
|
||||
dbm_MLDBM => "FreezeThaw"
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
This defines a known table C<quick> which uses the L<GDBM_File> backend and
|
||||
L<FreezeThaw> as serializer instead of the overall default L<BerkeleyDB> and
|
||||
L<JSON>. B<But> all files containing the table data have to be searched in
|
||||
C<< $dbh->{f_dir} >>, which requires C<< $dbh->{f_dir} >> must be initialized
|
||||
before C<< $dbh->{sql_meta}->{quick} >> is initialized by
|
||||
C<bootstrap_table_meta> method of L</DBI::DBD::SqlEngine::Table> to get
|
||||
C<< $dbh->{sql_meta}->{quick}->{f_dir} >> being initialized properly.
|
||||
|
||||
=item sql_init_phase
|
||||
|
||||
This attribute is only set during the initialization steps of the DBI
|
||||
Database Driver. It contains the value of the currently run initialization
|
||||
phase. Currently supported phases are I<phase 0> and I<phase 1>. This
|
||||
attribute is set in C<init_default_attributes> and removed in C<init_done>.
|
||||
|
||||
=item sql_engine_in_gofer
|
||||
|
||||
This value has a true value in case of this driver is operated via
|
||||
L<DBD::Gofer>. The impact of being operated via Gofer is a read-only
|
||||
driver (not read-only databases!), so you cannot modify any attributes
|
||||
later - neither any table settings. B<But> you won't get an error in
|
||||
cases you modify table attributes, so please carefully watch
|
||||
C<sql_engine_in_gofer>.
|
||||
|
||||
=item sql_table_source
|
||||
|
||||
Names a class which is responsible for delivering I<data sources> and
|
||||
I<available tables> (Database Driver related). I<data sources> here
|
||||
refers to L<DBI/data_sources>, not C<sql_data_source>.
|
||||
|
||||
See L</DBI::DBD::SqlEngine::TableSource> for details.
|
||||
|
||||
=item sql_data_source
|
||||
|
||||
Name a class which is responsible for handling table resources open
|
||||
and completing table names requested via SQL statements.
|
||||
|
||||
See L</DBI::DBD::SqlEngine::DataSource> for details.
|
||||
|
||||
=item sql_dialect
|
||||
|
||||
Controls the dialect understood by SQL::Parser. Possible values (delivery
|
||||
state of SQL::Statement):
|
||||
|
||||
* ANSI
|
||||
* CSV
|
||||
* AnyData
|
||||
|
||||
Defaults to "CSV". Because an SQL::Parser is instantiated only once and
|
||||
SQL::Parser doesn't allow one to modify the dialect once instantiated,
|
||||
it's strongly recommended to set this flag before any statement is
|
||||
executed (best place is connect attribute hash).
|
||||
|
||||
=back
|
||||
|
||||
=head2 DBI::DBD::SqlEngine::st
|
||||
|
||||
Contains the methods to deal with prepared statement handles:
|
||||
|
||||
=over 4
|
||||
|
||||
=item bind_param
|
||||
|
||||
Common routine to bind placeholders to a statement for execution. It
|
||||
is dangerous to override this method without detailed knowledge about
|
||||
the DBI::DBD::SqlEngine internal storage structure.
|
||||
|
||||
=item execute
|
||||
|
||||
Executes a previously prepared statement (with placeholders, if any).
|
||||
|
||||
=item finish
|
||||
|
||||
Finishes a statement handle, discards all buffered results. The prepared
|
||||
statement is not discarded so the statement can be executed again.
|
||||
|
||||
=item fetch
|
||||
|
||||
Fetches the next row from the result-set. This method may be rewritten
|
||||
in a later version and if it's overridden in a derived class, the
|
||||
derived implementation should not rely on the storage details.
|
||||
|
||||
=item fetchrow_arrayref
|
||||
|
||||
Alias for C<< fetch >>.
|
||||
|
||||
=item FETCH
|
||||
|
||||
Fetches statement handle attributes. Supported attributes (for full overview
|
||||
see L<DBI/Statement Handle Attributes>) are C<NAME>, C<TYPE>, C<PRECISION>
|
||||
and C<NULLABLE>. Each column is returned as C<NULLABLE> which might be wrong
|
||||
depending on the derived backend storage. If the statement handle has
|
||||
private attributes, they can be fetched using this method, too. B<Note> that
|
||||
statement attributes are not associated with any table used in this statement.
|
||||
|
||||
This method usually requires extending in a derived implementation.
|
||||
See L<DBD::CSV> or L<DBD::DBM> for some example.
|
||||
|
||||
=item STORE
|
||||
|
||||
Allows storing of statement private attributes. No special handling is
|
||||
currently implemented here.
|
||||
|
||||
=item rows
|
||||
|
||||
Returns the number of rows affected by the last execute. This method might
|
||||
return C<undef>.
|
||||
|
||||
=back
|
||||
|
||||
=head2 DBI::DBD::SqlEngine::TableSource
|
||||
|
||||
Provides data sources and table information on database driver and database
|
||||
handle level.
|
||||
|
||||
package DBI::DBD::SqlEngine::TableSource;
|
||||
|
||||
sub data_sources ($;$)
|
||||
{
|
||||
my ( $class, $drh, $attrs ) = @_;
|
||||
...
|
||||
}
|
||||
|
||||
sub avail_tables
|
||||
{
|
||||
my ( $class, $drh ) = @_;
|
||||
...
|
||||
}
|
||||
|
||||
The C<data_sources> method is called when the user invokes any of the
|
||||
following:
|
||||
|
||||
@ary = DBI->data_sources($driver);
|
||||
@ary = DBI->data_sources($driver, \%attr);
|
||||
|
||||
@ary = $dbh->data_sources();
|
||||
@ary = $dbh->data_sources(\%attr);
|
||||
|
||||
The C<avail_tables> method is called when the user invokes any of the
|
||||
following:
|
||||
|
||||
@names = $dbh->tables( $catalog, $schema, $table, $type );
|
||||
|
||||
$sth = $dbh->table_info( $catalog, $schema, $table, $type );
|
||||
$sth = $dbh->table_info( $catalog, $schema, $table, $type, \%attr );
|
||||
|
||||
$dbh->func( "list_tables" );
|
||||
|
||||
Every time where an C<\%attr> argument can be specified, this C<\%attr>
|
||||
object's C<sql_table_source> attribute is preferred over the C<$dbh>
|
||||
attribute or the driver default.
|
||||
|
||||
=head2 DBI::DBD::SqlEngine::DataSource
|
||||
|
||||
Provides base functionality for dealing with tables. It is primarily
|
||||
designed for allowing transparent access to files on disk or already
|
||||
opened (file-)streams (e.g. for DBD::CSV).
|
||||
|
||||
Derived classes shall be restricted to similar functionality, too (e.g.
|
||||
opening streams from an archive, transparently compress/uncompress
|
||||
log files before parsing them,
|
||||
|
||||
package DBI::DBD::SqlEngine::DataSource;
|
||||
|
||||
sub complete_table_name ($$;$)
|
||||
{
|
||||
my ( $self, $meta, $table, $respect_case ) = @_;
|
||||
...
|
||||
}
|
||||
|
||||
The method C<complete_table_name> is called when first setting up the
|
||||
I<meta information> for a table:
|
||||
|
||||
"SELECT user.id, user.name, user.shell FROM user WHERE ..."
|
||||
|
||||
results in opening the table C<user>. First step of the table open
|
||||
process is completing the name. Let's imagine you're having a L<DBD::CSV>
|
||||
handle with following settings:
|
||||
|
||||
$dbh->{sql_identifier_case} = SQL_IC_LOWER;
|
||||
$dbh->{f_ext} = '.lst';
|
||||
$dbh->{f_dir} = '/data/web/adrmgr';
|
||||
|
||||
Those settings will result in looking for files matching
|
||||
C<[Uu][Ss][Ee][Rr](\.lst)?$> in C</data/web/adrmgr/>. The scanning of the
|
||||
directory C</data/web/adrmgr/> and the pattern match check will be done
|
||||
in C<DBD::File::DataSource::File> by the C<complete_table_name> method.
|
||||
|
||||
If you intend to provide other sources of data streams than files, in
|
||||
addition to provide an appropriate C<complete_table_name> method, a method
|
||||
to open the resource is required:
|
||||
|
||||
package DBI::DBD::SqlEngine::DataSource;
|
||||
|
||||
sub open_data ($)
|
||||
{
|
||||
my ( $self, $meta, $attrs, $flags ) = @_;
|
||||
...
|
||||
}
|
||||
|
||||
After the method C<open_data> has been run successfully, the table's meta
|
||||
information are in a state which allows the table's data accessor methods
|
||||
will be able to fetch/store row information. Implementation details heavily
|
||||
depends on the table implementation, whereby the most famous is surely
|
||||
L<DBD::File::Table|DBD::File/DBD::File::Table>.
|
||||
|
||||
=head2 DBI::DBD::SqlEngine::Statement
|
||||
|
||||
Derives from DBI::SQL::Nano::Statement for unified naming when deriving
|
||||
new drivers. No additional feature is provided from here.
|
||||
|
||||
=head2 DBI::DBD::SqlEngine::Table
|
||||
|
||||
Derives from DBI::SQL::Nano::Table for unified naming when deriving
|
||||
new drivers.
|
||||
|
||||
You should consult the documentation of C<< SQL::Eval::Table >> (see
|
||||
L<SQL::Eval>) to get more information about the abstract methods of the
|
||||
table's base class you have to override and a description of the table
|
||||
meta information expected by the SQL engines.
|
||||
|
||||
=over 4
|
||||
|
||||
=item bootstrap_table_meta
|
||||
|
||||
Initializes a table meta structure. Can be safely overridden in a
|
||||
derived class, as long as the C<< SUPER >> method is called at the end
|
||||
of the overridden method.
|
||||
|
||||
It copies the following attributes from the database into the table meta data
|
||||
C<< $dbh->{ReadOnly} >> into C<< $meta->{readonly} >>, C<sql_identifier_case>
|
||||
and C<sql_data_source> and makes them sticky to the table.
|
||||
|
||||
This method should be called before you attempt to map between file
|
||||
name and table name to ensure the correct directory, extension etc. are
|
||||
used.
|
||||
|
||||
=item init_table_meta
|
||||
|
||||
Initializes more attributes of the table meta data - usually more
|
||||
expensive ones (e.g. those which require class instantiations) - when
|
||||
the file name and the table name could mapped.
|
||||
|
||||
=item get_table_meta
|
||||
|
||||
Returns the table meta data. If there are none for the required table,
|
||||
a new one is initialized. When after bootstrapping a new I<table_meta>
|
||||
and L<completing the table name|/DBI::DBD::SqlEngine::DataSource> a
|
||||
mapping can be established between an existing I<table_meta> and the
|
||||
new bootstrapped one, the already existing is used and a mapping
|
||||
shortcut between the recent used table name and the already known
|
||||
table name is hold in C<< $dbh->{sql_meta_map} >>. When it fails,
|
||||
nothing is returned. On success, the name of the table and the meta data
|
||||
structure is returned.
|
||||
|
||||
=item get_table_meta_attr
|
||||
|
||||
Returns a single attribute from the table meta data. If the attribute
|
||||
name appears in C<%compat_map>, the attribute name is updated from
|
||||
there.
|
||||
|
||||
=item set_table_meta_attr
|
||||
|
||||
Sets a single attribute in the table meta data. If the attribute
|
||||
name appears in C<%compat_map>, the attribute name is updated from
|
||||
there.
|
||||
|
||||
=item table_meta_attr_changed
|
||||
|
||||
Called when an attribute of the meta data is modified.
|
||||
|
||||
If the modified attribute requires to reset a calculated attribute, the
|
||||
calculated attribute is reset (deleted from meta data structure) and
|
||||
the I<initialized> flag is removed, too. The decision is made based on
|
||||
C<%register_reset_on_modify>.
|
||||
|
||||
=item register_reset_on_modify
|
||||
|
||||
Allows C<set_table_meta_attr> to reset meta attributes when special
|
||||
attributes are modified. For DBD::File, modifying one of C<f_file>, C<f_dir>,
|
||||
C<f_ext> or C<f_lockfile> will reset C<f_fqfn>. DBD::DBM extends the
|
||||
list for C<dbm_type> and C<dbm_mldbm> to reset the value of C<dbm_tietype>.
|
||||
|
||||
If your DBD has calculated values in the meta data area, then call
|
||||
C<register_reset_on_modify>:
|
||||
|
||||
my %reset_on_modify = ( "xxx_foo" => "xxx_bar" );
|
||||
__PACKAGE__->register_reset_on_modify( \%reset_on_modify );
|
||||
|
||||
=item register_compat_map
|
||||
|
||||
Allows C<get_table_meta_attr> and C<set_table_meta_attr> to update the
|
||||
attribute name to the current favored one:
|
||||
|
||||
# from DBD::DBM
|
||||
my %compat_map = ( "dbm_ext" => "f_ext" );
|
||||
__PACKAGE__->register_compat_map( \%compat_map );
|
||||
|
||||
=item open_data
|
||||
|
||||
Called to open the table's data storage. This is silently forwarded
|
||||
to C<< $meta->{sql_data_source}->open_data() >>.
|
||||
|
||||
After this is done, a derived class might add more steps in an overridden
|
||||
C<< open_file >> method.
|
||||
|
||||
=item new
|
||||
|
||||
Instantiates the table. This is done in 3 steps:
|
||||
|
||||
1. get the table meta data
|
||||
2. open the data file
|
||||
3. bless the table data structure using inherited constructor new
|
||||
|
||||
It is not recommended to override the constructor of the table class.
|
||||
Find a reasonable place to add you extensions in one of the above four
|
||||
methods.
|
||||
|
||||
=back
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
The module DBI::DBD::SqlEngine is currently maintained by
|
||||
|
||||
H.Merijn Brand < h.m.brand at xs4all.nl > and
|
||||
Jens Rehsack < rehsack at googlemail.com >
|
||||
|
||||
=head1 COPYRIGHT AND LICENSE
|
||||
|
||||
Copyright (C) 2010 by H.Merijn Brand & Jens Rehsack
|
||||
|
||||
All rights reserved.
|
||||
|
||||
You may freely distribute and/or modify this module under the terms of
|
||||
either the GNU General Public License (GPL) or the Artistic License, as
|
||||
specified in the Perl README file.
|
||||
|
||||
=cut
|
|
@ -0,0 +1,333 @@
|
|||
=head1 NAME
|
||||
|
||||
DBI::DBD::SqlEngine::HowTo - Guide to create DBI::DBD::SqlEngine based driver
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
perldoc DBI::DBD::SqlEngine::HowTo
|
||||
perldoc DBI
|
||||
perldoc DBI::DBD
|
||||
perldoc DBI::DBD::SqlEngine::Developers
|
||||
perldoc SQL::Eval
|
||||
perldoc DBI::DBD::SqlEngine
|
||||
perldoc DBI::DBD::SqlEngine::HowTo
|
||||
perldoc SQL::Statement::Embed
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
This document provides a step-by-step guide, how to create a new
|
||||
C<DBI::DBD::SqlEngine> based DBD. It expects that you carefully read the
|
||||
L<DBI> documentation and that you're familiar with L<DBI::DBD> and had
|
||||
read and understood L<DBD::ExampleP>.
|
||||
|
||||
This document addresses experienced developers who are really sure that
|
||||
they need to invest time when writing a new DBI Driver. Writing a DBI
|
||||
Driver is neither a weekend project nor an easy job for hobby coders
|
||||
after work. Expect one or two man-month of time for the first start.
|
||||
|
||||
Those who are still reading, should be able to sing the rules of
|
||||
L<DBI::DBD/CREATING A NEW DRIVER>.
|
||||
|
||||
=head1 CREATING DRIVER CLASSES
|
||||
|
||||
Do you have an entry in DBI's DBD registry? DBI::DBD::SqlEngine expect
|
||||
having a unique prefix for every driver class in inheritance chain.
|
||||
|
||||
It's easy to get a prefix - just drop the DBI team a note
|
||||
(L<DBI/GETTING_HELP>). If you want for some reason hide your work, take
|
||||
a look at L<Class::Method::Modifiers> how to wrap a private prefix method
|
||||
around existing C<driver_prefix>.
|
||||
|
||||
For this guide, a prefix of C<foo_> is assumed.
|
||||
|
||||
=head2 Sample Skeleton
|
||||
|
||||
package DBD::Foo;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use vars qw($VERSION);
|
||||
use base qw(DBI::DBD::SqlEngine);
|
||||
|
||||
use DBI ();
|
||||
|
||||
$VERSION = "0.001";
|
||||
|
||||
package DBD::Foo::dr;
|
||||
|
||||
use vars qw(@ISA $imp_data_size);
|
||||
|
||||
@ISA = qw(DBI::DBD::SqlEngine::dr);
|
||||
$imp_data_size = 0;
|
||||
|
||||
package DBD::Foo::db;
|
||||
|
||||
use vars qw(@ISA $imp_data_size);
|
||||
|
||||
@ISA = qw(DBI::DBD::SqlEngine::db);
|
||||
$imp_data_size = 0;
|
||||
|
||||
package DBD::Foo::st;
|
||||
|
||||
use vars qw(@ISA $imp_data_size);
|
||||
|
||||
@ISA = qw(DBI::DBD::SqlEngine::st);
|
||||
$imp_data_size = 0;
|
||||
|
||||
package DBD::Foo::Statement;
|
||||
|
||||
use vars qw(@ISA);
|
||||
|
||||
@ISA = qw(DBI::DBD::SqlEngine::Statement);
|
||||
|
||||
package DBD::Foo::Table;
|
||||
|
||||
use vars qw(@ISA);
|
||||
|
||||
@ISA = qw(DBI::DBD::SqlEngine::Table);
|
||||
|
||||
1;
|
||||
|
||||
Tiny, eh? And all you have now is a DBD named foo which will is able to
|
||||
deal with temporary tables, as long as you use L<SQL::Statement>. In
|
||||
L<DBI::SQL::Nano> environments, this DBD can do nothing.
|
||||
|
||||
=head2 Deal with own attributes
|
||||
|
||||
Before we start doing usable stuff with our DBI driver, we need to think
|
||||
about what we want to do and how we want to do it.
|
||||
|
||||
Do we need tunable knobs accessible by users? Do we need status
|
||||
information? All this is handled in attributes of the database handles (be
|
||||
careful when your DBD is running "behind" a L<DBD::Gofer> proxy).
|
||||
|
||||
How come the attributes into the DBD and how are they fetchable by the
|
||||
user? Good question, but you should know because you've read the L<DBI>
|
||||
documentation.
|
||||
|
||||
C<DBI::DBD::SqlEngine::db::FETCH> and C<DBI::DBD::SqlEngine::db::STORE>
|
||||
taking care for you - all they need to know is which attribute names
|
||||
are valid and mutable or immutable. Tell them by adding
|
||||
C<init_valid_attributes> to your db class:
|
||||
|
||||
sub init_valid_attributes
|
||||
{
|
||||
my $dbh = $_[0];
|
||||
|
||||
$dbh->SUPER::init_valid_attributes ();
|
||||
|
||||
$dbh->{foo_valid_attrs} = {
|
||||
foo_version => 1, # contains version of this driver
|
||||
foo_valid_attrs => 1, # contains the valid attributes of foo drivers
|
||||
foo_readonly_attrs => 1, # contains immutable attributes of foo drivers
|
||||
foo_bar => 1, # contains the bar attribute
|
||||
foo_baz => 1, # contains the baz attribute
|
||||
foo_manager => 1, # contains the manager of the driver instance
|
||||
foo_manager_type => 1, # contains the manager class of the driver instance
|
||||
};
|
||||
$dbh->{foo_readonly_attrs} = {
|
||||
foo_version => 1, # ensure no-one modifies the driver version
|
||||
foo_valid_attrs => 1, # do not permit one to add more valid attributes ...
|
||||
foo_readonly_attrs => 1, # ... or make the immutable mutable
|
||||
foo_manager => 1, # manager is set internally only
|
||||
};
|
||||
|
||||
return $dbh;
|
||||
}
|
||||
|
||||
Woooho - but now the user cannot assign new managers? This is intended,
|
||||
overwrite C<STORE> to handle it!
|
||||
|
||||
sub STORE ($$$)
|
||||
{
|
||||
my ( $dbh, $attrib, $value ) = @_;
|
||||
|
||||
$dbh->SUPER::STORE( $attrib, $value );
|
||||
|
||||
# we're still alive, so no exception is thrown ...
|
||||
# by DBI::DBD::SqlEngine::db::STORE
|
||||
if ( $attrib eq "foo_manager_type" )
|
||||
{
|
||||
$dbh->{foo_manager} = $dbh->{foo_manager_type}->new();
|
||||
# ... probably correct some states based on the new
|
||||
# foo_manager_type - see DBD::Sys for an example
|
||||
}
|
||||
}
|
||||
|
||||
But ... my driver runs without a manager until someone first assignes
|
||||
a C<foo_manager_type>. Well, no - there're two places where you can
|
||||
initialize defaults:
|
||||
|
||||
sub init_default_attributes
|
||||
{
|
||||
my ($dbh, $phase) = @_;
|
||||
|
||||
$dbh->SUPER::init_default_attributes($phase);
|
||||
|
||||
if( 0 == $phase )
|
||||
{
|
||||
# init all attributes which have no knowledge about
|
||||
# user settings from DSN or the attribute hash
|
||||
$dbh->{foo_manager_type} = "DBD::Foo::Manager";
|
||||
}
|
||||
elsif( 1 == $phase )
|
||||
{
|
||||
# init phase with more knowledge from DSN or attribute
|
||||
# hash
|
||||
$dbh->{foo_manager} = $dbh->{foo_manager_type}->new();
|
||||
}
|
||||
|
||||
return $dbh;
|
||||
}
|
||||
|
||||
So far we can prevent the users to use our database driver as data
|
||||
storage for anything and everything. We care only about the real important
|
||||
stuff for peace on earth and alike attributes. But in fact, the driver
|
||||
still can't do anything. It can do less than nothing - meanwhile it's
|
||||
not a stupid storage area anymore.
|
||||
|
||||
=head2 User comfort
|
||||
|
||||
C<DBI::DBD::SqlEngine> since C<0.05> consolidates all persistent meta data
|
||||
of a table into a single structure stored in C<< $dbh->{sql_meta} >>. While
|
||||
DBI::DBD::SqlEngine provides only readonly access to this structure,
|
||||
modifications are still allowed.
|
||||
|
||||
Primarily DBI::DBD::SqlEngine provides access via the setters
|
||||
C<new_sql_engine_meta>, C<get_sql_engine_meta>, C<get_single_table_meta>,
|
||||
C<set_single_table_meta>, C<set_sql_engine_meta> and C<clear_sql_engine_meta>.
|
||||
Those methods are easily accessible by the users via the C<< $dbh->func () >>
|
||||
interface provided by DBI. Well, many users don't feel comfortize when calling
|
||||
|
||||
# don't require extension for tables cars
|
||||
$dbh->func ("cars", "f_ext", ".csv", "set_sql_engine_meta");
|
||||
|
||||
DBI::DBD::SqlEngine will inject a method into your driver to increase the
|
||||
user comfort to allow:
|
||||
|
||||
# don't require extension for tables cars
|
||||
$dbh->foo_set_meta ("cars", "f_ext", ".csv");
|
||||
|
||||
Better, but here and there users likes to do:
|
||||
|
||||
# don't require extension for tables cars
|
||||
$dbh->{foo_tables}->{cars}->{f_ext} = ".csv";
|
||||
|
||||
This interface is provided when derived DBD's define following in
|
||||
C<init_valid_attributes> (re-capture L</Deal with own attributes>):
|
||||
|
||||
sub init_valid_attributes
|
||||
{
|
||||
my $dbh = $_[0];
|
||||
|
||||
$dbh->SUPER::init_valid_attributes ();
|
||||
|
||||
$dbh->{foo_valid_attrs} = {
|
||||
foo_version => 1, # contains version of this driver
|
||||
foo_valid_attrs => 1, # contains the valid attributes of foo drivers
|
||||
foo_readonly_attrs => 1, # contains immutable attributes of foo drivers
|
||||
foo_bar => 1, # contains the bar attribute
|
||||
foo_baz => 1, # contains the baz attribute
|
||||
foo_manager => 1, # contains the manager of the driver instance
|
||||
foo_manager_type => 1, # contains the manager class of the driver instance
|
||||
foo_meta => 1, # contains the public interface to modify table meta attributes
|
||||
};
|
||||
$dbh->{foo_readonly_attrs} = {
|
||||
foo_version => 1, # ensure no-one modifies the driver version
|
||||
foo_valid_attrs => 1, # do not permit one to add more valid attributes ...
|
||||
foo_readonly_attrs => 1, # ... or make the immutable mutable
|
||||
foo_manager => 1, # manager is set internally only
|
||||
foo_meta => 1, # ensure public interface to modify table meta attributes are immutable
|
||||
};
|
||||
|
||||
$dbh->{foo_meta} = "foo_tables";
|
||||
|
||||
return $dbh;
|
||||
}
|
||||
|
||||
This provides a tied hash in C<< $dbh->{foo_tables} >> and a tied hash for
|
||||
each table's meta data in C<< $dbh->{foo_tables}->{$table_name} >>.
|
||||
Modifications on the table meta attributes are done using the table
|
||||
methods:
|
||||
|
||||
sub get_table_meta_attr { ... }
|
||||
sub set_table_meta_attr { ... }
|
||||
|
||||
Both methods can adjust the attribute name for compatibility reasons, e.g.
|
||||
when former versions of the DBD allowed different names to be used for the
|
||||
same flag:
|
||||
|
||||
my %compat_map = (
|
||||
abc => 'foo_abc',
|
||||
xyz => 'foo_xyz',
|
||||
);
|
||||
__PACKAGE__->register_compat_map( \%compat_map );
|
||||
|
||||
If any user modification on a meta attribute needs reinitialization of
|
||||
the meta structure (in case of C<DBI::DBD::SqlEngine> these are the attributes
|
||||
C<f_file>, C<f_dir>, C<f_ext> and C<f_lockfile>), inform DBI::DBD::SqlEngine by
|
||||
doing
|
||||
|
||||
my %reset_on_modify = (
|
||||
foo_xyz => "foo_bar",
|
||||
foo_abc => "foo_bar",
|
||||
);
|
||||
__PACKAGE__->register_reset_on_modify( \%reset_on_modify );
|
||||
|
||||
The next access to the table meta data will force DBI::DBD::SqlEngine to re-do the
|
||||
entire meta initialization process.
|
||||
|
||||
Any further action which needs to be taken can handled in
|
||||
C<table_meta_attr_changed>:
|
||||
|
||||
sub table_meta_attr_changed
|
||||
{
|
||||
my ($class, $meta, $attrib, $value) = @_;
|
||||
...
|
||||
$class->SUPER::table_meta_attr_changed ($meta, $attrib, $value);
|
||||
}
|
||||
|
||||
This is done before the new value is set in C<$meta>, so the attribute
|
||||
changed handler can act depending on the old value.
|
||||
|
||||
=head2 Dealing with Tables
|
||||
|
||||
Let's put some life into it - it's going to be time for it.
|
||||
|
||||
This is a good point where a quick side step to L<SQL::Statement::Embed>
|
||||
will help to shorten the next paragraph. The documentation in
|
||||
SQL::Statement::Embed regarding embedding in own DBD's works pretty
|
||||
fine with SQL::Statement and DBI::SQL::Nano.
|
||||
|
||||
Second look should go to L<DBI::DBD::SqlEngine::Developers> to get a
|
||||
picture over the driver part of the table API. Usually there isn't much
|
||||
to do for an easy driver.
|
||||
|
||||
=head2 Testing
|
||||
|
||||
Now you should have your first own DBD. Was easy, wasn't it? But does
|
||||
it work well? Prove it by writing tests and remember to use
|
||||
dbd_edit_mm_attribs from L<DBI::DBD> to ensure testing even rare cases.
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
This guide is written by Jens Rehsack. DBI::DBD::SqlEngine is written by
|
||||
Jens Rehsack using code from DBD::File originally written by Jochen
|
||||
Wiedmann and Jeff Zucker.
|
||||
|
||||
The module DBI::DBD::SqlEngine is currently maintained by
|
||||
|
||||
H.Merijn Brand < h.m.brand at xs4all.nl > and
|
||||
Jens Rehsack < rehsack at googlemail.com >
|
||||
|
||||
=head1 COPYRIGHT AND LICENSE
|
||||
|
||||
Copyright (C) 2010 by H.Merijn Brand & Jens Rehsack
|
||||
|
||||
All rights reserved.
|
||||
|
||||
You may freely distribute and/or modify this module under the terms of
|
||||
either the GNU General Public License (GPL) or the Artistic License, as
|
||||
specified in the Perl README file.
|
||||
|
||||
=cut
|
|
@ -0,0 +1,900 @@
|
|||
package DBI::Gofer::Execute;
|
||||
|
||||
# $Id: Execute.pm 14282 2010-07-26 00:12:54Z David $
|
||||
#
|
||||
# Copyright (c) 2007, Tim Bunce, Ireland
|
||||
#
|
||||
# You may distribute under the terms of either the GNU General Public
|
||||
# License or the Artistic License, as specified in the Perl README file.
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use Carp;
|
||||
|
||||
use DBI qw(dbi_time);
|
||||
use DBI::Gofer::Request;
|
||||
use DBI::Gofer::Response;
|
||||
|
||||
use base qw(DBI::Util::_accessor);
|
||||
|
||||
our $VERSION = "0.014283";
|
||||
|
||||
our @all_dbh_methods = sort map { keys %$_ } $DBI::DBI_methods{db}, $DBI::DBI_methods{common};
|
||||
our %all_dbh_methods = map { $_ => (DBD::_::db->can($_)||undef) } @all_dbh_methods;
|
||||
|
||||
our $local_log = $ENV{DBI_GOFER_LOCAL_LOG}; # do extra logging to stderr
|
||||
|
||||
our $current_dbh; # the dbh we're using for this request
|
||||
|
||||
|
||||
# set trace for server-side gofer
|
||||
# Could use DBI_TRACE env var when it's an unrelated separate process
|
||||
# but using DBI_GOFER_TRACE makes testing easier for subprocesses (eg stream)
|
||||
DBI->trace(split /=/, $ENV{DBI_GOFER_TRACE}, 2) if $ENV{DBI_GOFER_TRACE};
|
||||
|
||||
|
||||
# define valid configuration attributes (args to new())
|
||||
# the values here indicate the basic type of values allowed
|
||||
my %configuration_attributes = (
|
||||
gofer_execute_class => 1,
|
||||
default_connect_dsn => 1,
|
||||
forced_connect_dsn => 1,
|
||||
default_connect_attributes => {},
|
||||
forced_connect_attributes => {},
|
||||
track_recent => 1,
|
||||
check_request_sub => sub {},
|
||||
check_response_sub => sub {},
|
||||
forced_single_resultset => 1,
|
||||
max_cached_dbh_per_drh => 1,
|
||||
max_cached_sth_per_dbh => 1,
|
||||
forced_response_attributes => {},
|
||||
forced_gofer_random => 1,
|
||||
stats => {},
|
||||
);
|
||||
|
||||
__PACKAGE__->mk_accessors(
|
||||
keys %configuration_attributes
|
||||
);
|
||||
|
||||
|
||||
|
||||
sub new {
|
||||
my ($self, $args) = @_;
|
||||
$args->{default_connect_attributes} ||= {};
|
||||
$args->{forced_connect_attributes} ||= {};
|
||||
$args->{max_cached_sth_per_dbh} ||= 1000;
|
||||
$args->{stats} ||= {};
|
||||
return $self->SUPER::new($args);
|
||||
}
|
||||
|
||||
|
||||
sub valid_configuration_attributes {
|
||||
my $self = shift;
|
||||
return { %configuration_attributes };
|
||||
}
|
||||
|
||||
|
||||
my %extra_attr = (
|
||||
# Only referenced if the driver doesn't support private_attribute_info method.
|
||||
# What driver-specific attributes should be returned for the driver being used?
|
||||
# keyed by $dbh->{Driver}{Name}
|
||||
# XXX for sth should split into attr specific to resultsets (where NUM_OF_FIELDS > 0) and others
|
||||
# which would reduce processing/traffic for non-select statements
|
||||
mysql => {
|
||||
dbh => [qw(
|
||||
mysql_errno mysql_error mysql_hostinfo mysql_info mysql_insertid
|
||||
mysql_protoinfo mysql_serverinfo mysql_stat mysql_thread_id
|
||||
)],
|
||||
sth => [qw(
|
||||
mysql_is_blob mysql_is_key mysql_is_num mysql_is_pri_key mysql_is_auto_increment
|
||||
mysql_length mysql_max_length mysql_table mysql_type mysql_type_name mysql_insertid
|
||||
)],
|
||||
# XXX this dbh_after_sth stuff is a temporary, but important, hack.
|
||||
# should be done via hash instead of arrays where the hash value contains
|
||||
# flags that can indicate which attributes need to be handled in this way
|
||||
dbh_after_sth => [qw(
|
||||
mysql_insertid
|
||||
)],
|
||||
},
|
||||
Pg => {
|
||||
dbh => [qw(
|
||||
pg_protocol pg_lib_version pg_server_version
|
||||
pg_db pg_host pg_port pg_default_port
|
||||
pg_options pg_pid
|
||||
)],
|
||||
sth => [qw(
|
||||
pg_size pg_type pg_oid_status pg_cmd_status
|
||||
)],
|
||||
},
|
||||
Sybase => {
|
||||
dbh => [qw(
|
||||
syb_dynamic_supported syb_oc_version syb_server_version syb_server_version_string
|
||||
)],
|
||||
sth => [qw(
|
||||
syb_types syb_proc_status syb_result_type
|
||||
)],
|
||||
},
|
||||
SQLite => {
|
||||
dbh => [qw(
|
||||
sqlite_version
|
||||
)],
|
||||
sth => [qw(
|
||||
)],
|
||||
},
|
||||
ExampleP => {
|
||||
dbh => [qw(
|
||||
examplep_private_dbh_attrib
|
||||
)],
|
||||
sth => [qw(
|
||||
examplep_private_sth_attrib
|
||||
)],
|
||||
dbh_after_sth => [qw(
|
||||
examplep_insertid
|
||||
)],
|
||||
},
|
||||
);
|
||||
|
||||
|
||||
sub _connect {
|
||||
my ($self, $request) = @_;
|
||||
|
||||
my $stats = $self->{stats};
|
||||
|
||||
# discard CachedKids from time to time
|
||||
if (++$stats->{_requests_served} % 1000 == 0 # XXX config?
|
||||
and my $max_cached_dbh_per_drh = $self->{max_cached_dbh_per_drh}
|
||||
) {
|
||||
my %drivers = DBI->installed_drivers();
|
||||
while ( my ($driver, $drh) = each %drivers ) {
|
||||
next unless my $CK = $drh->{CachedKids};
|
||||
next unless keys %$CK > $max_cached_dbh_per_drh;
|
||||
next if $driver eq 'Gofer'; # ie transport=null when testing
|
||||
DBI->trace_msg(sprintf "Clearing %d cached dbh from $driver",
|
||||
scalar keys %$CK, $self->{max_cached_dbh_per_drh});
|
||||
$_->{Active} && $_->disconnect for values %$CK;
|
||||
%$CK = ();
|
||||
}
|
||||
}
|
||||
|
||||
# local $ENV{...} can leak, so only do it if required
|
||||
local $ENV{DBI_AUTOPROXY} if $ENV{DBI_AUTOPROXY};
|
||||
|
||||
my ($connect_method, $dsn, $username, $password, $attr) = @{ $request->dbh_connect_call };
|
||||
$connect_method ||= 'connect_cached';
|
||||
$stats->{method_calls_dbh}->{$connect_method}++;
|
||||
|
||||
# delete attributes we don't want to affect the server-side
|
||||
# (Could just do this on client-side and trust the client. DoS?)
|
||||
delete @{$attr}{qw(Profile InactiveDestroy AutoInactiveDestroy HandleError HandleSetErr TraceLevel Taint TaintIn TaintOut)};
|
||||
|
||||
$dsn = $self->forced_connect_dsn || $dsn || $self->default_connect_dsn
|
||||
or die "No forced_connect_dsn, requested dsn, or default_connect_dsn for request";
|
||||
|
||||
my $random = $self->{forced_gofer_random} || $ENV{DBI_GOFER_RANDOM} || '';
|
||||
|
||||
my $connect_attr = {
|
||||
|
||||
# the configured default attributes, if any
|
||||
%{ $self->default_connect_attributes },
|
||||
|
||||
# pass username and password as attributes
|
||||
# then they can be overridden by forced_connect_attributes
|
||||
Username => $username,
|
||||
Password => $password,
|
||||
|
||||
# the requested attributes
|
||||
%$attr,
|
||||
|
||||
# force some attributes the way we'd like them
|
||||
PrintWarn => $local_log,
|
||||
PrintError => $local_log,
|
||||
|
||||
# the configured default attributes, if any
|
||||
%{ $self->forced_connect_attributes },
|
||||
|
||||
# RaiseError must be enabled
|
||||
RaiseError => 1,
|
||||
|
||||
# reset Executed flag (of the cached handle) so we can use it to tell
|
||||
# if errors happened before the main part of the request was executed
|
||||
Executed => 0,
|
||||
|
||||
# ensure this connect_cached doesn't have the same args as the client
|
||||
# because that causes subtle issues if in the same process (ie transport=null)
|
||||
# include pid to avoid problems with forking (ie null transport in mod_perl)
|
||||
# include gofer-random to avoid random behaviour leaking to other handles
|
||||
dbi_go_execute_unique => join("|", __PACKAGE__, $$, $random),
|
||||
};
|
||||
|
||||
# XXX implement our own private connect_cached method? (with rate-limited ping)
|
||||
my $dbh = DBI->$connect_method($dsn, undef, undef, $connect_attr);
|
||||
|
||||
$dbh->{ShowErrorStatement} = 1 if $local_log;
|
||||
|
||||
# XXX should probably just be a Callbacks => arg to connect_cached
|
||||
# with a cache of pre-built callback hooks (memoized, without $self)
|
||||
if (my $random = $self->{forced_gofer_random} || $ENV{DBI_GOFER_RANDOM}) {
|
||||
$self->_install_rand_callbacks($dbh, $random);
|
||||
}
|
||||
|
||||
my $CK = $dbh->{CachedKids};
|
||||
if ($CK && keys %$CK > $self->{max_cached_sth_per_dbh}) {
|
||||
%$CK = (); # clear all statement handles
|
||||
}
|
||||
|
||||
#$dbh->trace(0);
|
||||
$current_dbh = $dbh;
|
||||
return $dbh;
|
||||
}
|
||||
|
||||
|
||||
sub reset_dbh {
|
||||
my ($self, $dbh) = @_;
|
||||
$dbh->set_err(undef, undef); # clear any error state
|
||||
}
|
||||
|
||||
|
||||
sub new_response_with_err {
|
||||
my ($self, $rv, $eval_error, $dbh) = @_;
|
||||
# this is the usual way to create a response for both success and failure
|
||||
# capture err+errstr etc and merge in $eval_error ($@)
|
||||
|
||||
my ($err, $errstr, $state) = ($DBI::err, $DBI::errstr, $DBI::state);
|
||||
|
||||
if ($eval_error) {
|
||||
$err ||= $DBI::stderr || 1; # ensure err is true
|
||||
if ($errstr) {
|
||||
$eval_error =~ s/(?: : \s)? \Q$errstr//x if $errstr;
|
||||
chomp $errstr;
|
||||
$errstr .= "; $eval_error";
|
||||
}
|
||||
else {
|
||||
$errstr = $eval_error;
|
||||
}
|
||||
}
|
||||
chomp $errstr if $errstr;
|
||||
|
||||
my $flags;
|
||||
# (XXX if we ever add transaction support then we'll need to take extra
|
||||
# steps because the commit/rollback would reset Executed before we get here)
|
||||
$flags |= GOf_RESPONSE_EXECUTED if $dbh && $dbh->{Executed};
|
||||
|
||||
my $response = DBI::Gofer::Response->new({
|
||||
rv => $rv,
|
||||
err => $err,
|
||||
errstr => $errstr,
|
||||
state => $state,
|
||||
flags => $flags,
|
||||
});
|
||||
|
||||
return $response;
|
||||
}
|
||||
|
||||
|
||||
sub execute_request {
|
||||
my ($self, $request) = @_;
|
||||
# should never throw an exception
|
||||
|
||||
DBI->trace_msg("-----> execute_request\n");
|
||||
|
||||
my @warnings;
|
||||
local $SIG{__WARN__} = sub {
|
||||
push @warnings, @_;
|
||||
warn @_ if $local_log;
|
||||
};
|
||||
|
||||
my $response = eval {
|
||||
|
||||
if (my $check_request_sub = $self->check_request_sub) {
|
||||
$request = $check_request_sub->($request, $self)
|
||||
or die "check_request_sub failed";
|
||||
}
|
||||
|
||||
my $version = $request->version || 0;
|
||||
die ref($request)." version $version is not supported"
|
||||
if $version < 0.009116 or $version >= 1;
|
||||
|
||||
($request->is_sth_request)
|
||||
? $self->execute_sth_request($request)
|
||||
: $self->execute_dbh_request($request);
|
||||
};
|
||||
$response ||= $self->new_response_with_err(undef, $@, $current_dbh);
|
||||
|
||||
if (my $check_response_sub = $self->check_response_sub) {
|
||||
# not protected with an eval so it can choose to throw an exception
|
||||
my $new = $check_response_sub->($response, $self, $request);
|
||||
$response = $new if ref $new;
|
||||
}
|
||||
|
||||
undef $current_dbh;
|
||||
|
||||
$response->warnings(\@warnings) if @warnings;
|
||||
DBI->trace_msg("<----- execute_request\n");
|
||||
return $response;
|
||||
}
|
||||
|
||||
|
||||
sub execute_dbh_request {
|
||||
my ($self, $request) = @_;
|
||||
my $stats = $self->{stats};
|
||||
|
||||
my $dbh;
|
||||
my $rv_ref = eval {
|
||||
$dbh = $self->_connect($request);
|
||||
my $args = $request->dbh_method_call; # [ wantarray, 'method_name', @args ]
|
||||
my $wantarray = shift @$args;
|
||||
my $meth = shift @$args;
|
||||
$stats->{method_calls_dbh}->{$meth}++;
|
||||
my @rv = ($wantarray)
|
||||
? $dbh->$meth(@$args)
|
||||
: scalar $dbh->$meth(@$args);
|
||||
\@rv;
|
||||
} || [];
|
||||
my $response = $self->new_response_with_err($rv_ref, $@, $dbh);
|
||||
|
||||
return $response if not $dbh;
|
||||
|
||||
# does this request also want any dbh attributes returned?
|
||||
if (my $dbh_attributes = $request->dbh_attributes) {
|
||||
$response->dbh_attributes( $self->gather_dbh_attributes($dbh, $dbh_attributes) );
|
||||
}
|
||||
|
||||
if ($rv_ref and my $lid_args = $request->dbh_last_insert_id_args) {
|
||||
$stats->{method_calls_dbh}->{last_insert_id}++;
|
||||
my $id = $dbh->last_insert_id( @$lid_args );
|
||||
$response->last_insert_id( $id );
|
||||
}
|
||||
|
||||
if ($rv_ref and UNIVERSAL::isa($rv_ref->[0],'DBI::st')) {
|
||||
# dbh_method_call was probably a metadata method like table_info
|
||||
# that returns a statement handle, so turn the $sth into resultset
|
||||
my $sth = $rv_ref->[0];
|
||||
$response->sth_resultsets( $self->gather_sth_resultsets($sth, $request, $response) );
|
||||
$response->rv("(sth)"); # don't try to return actual sth
|
||||
}
|
||||
|
||||
# we're finished with this dbh for this request
|
||||
$self->reset_dbh($dbh);
|
||||
|
||||
return $response;
|
||||
}
|
||||
|
||||
|
||||
sub gather_dbh_attributes {
|
||||
my ($self, $dbh, $dbh_attributes) = @_;
|
||||
my @req_attr_names = @$dbh_attributes;
|
||||
if ($req_attr_names[0] eq '*') { # auto include std + private
|
||||
shift @req_attr_names;
|
||||
push @req_attr_names, @{ $self->_std_response_attribute_names($dbh) };
|
||||
}
|
||||
my %dbh_attr_values;
|
||||
@dbh_attr_values{@req_attr_names} = $dbh->FETCH_many(@req_attr_names);
|
||||
|
||||
# XXX piggyback installed_methods onto dbh_attributes for now
|
||||
$dbh_attr_values{dbi_installed_methods} = { DBI->installed_methods };
|
||||
|
||||
# XXX piggyback default_methods onto dbh_attributes for now
|
||||
$dbh_attr_values{dbi_default_methods} = _get_default_methods($dbh);
|
||||
|
||||
return \%dbh_attr_values;
|
||||
}
|
||||
|
||||
|
||||
sub _std_response_attribute_names {
|
||||
my ($self, $h) = @_;
|
||||
$h = tied(%$h) || $h; # switch to inner handle
|
||||
|
||||
# cache the private_attribute_info data for each handle
|
||||
# XXX might be better to cache it in the executor
|
||||
# as it's unlikely to change
|
||||
# or perhaps at least cache it in the dbh even for sth
|
||||
# as the sth are typically very short lived
|
||||
|
||||
my ($dbh, $h_type, $driver_name, @attr_names);
|
||||
|
||||
if ($dbh = $h->{Database}) { # is an sth
|
||||
|
||||
# does the dbh already have the answer cached?
|
||||
return $dbh->{private_gofer_std_attr_names_sth} if $dbh->{private_gofer_std_attr_names_sth};
|
||||
|
||||
($h_type, $driver_name) = ('sth', $dbh->{Driver}{Name});
|
||||
push @attr_names, qw(NUM_OF_PARAMS NUM_OF_FIELDS NAME TYPE NULLABLE PRECISION SCALE);
|
||||
}
|
||||
else { # is a dbh
|
||||
return $h->{private_gofer_std_attr_names_dbh} if $h->{private_gofer_std_attr_names_dbh};
|
||||
|
||||
($h_type, $driver_name, $dbh) = ('dbh', $h->{Driver}{Name}, $h);
|
||||
# explicitly add these because drivers may have different defaults
|
||||
# add Name so the client gets the real Name of the connection
|
||||
push @attr_names, qw(ChopBlanks LongReadLen LongTruncOk ReadOnly Name);
|
||||
}
|
||||
|
||||
if (my $pai = $h->private_attribute_info) {
|
||||
push @attr_names, keys %$pai;
|
||||
}
|
||||
else {
|
||||
push @attr_names, @{ $extra_attr{ $driver_name }{$h_type} || []};
|
||||
}
|
||||
if (my $fra = $self->{forced_response_attributes}) {
|
||||
push @attr_names, @{ $fra->{ $driver_name }{$h_type} || []}
|
||||
}
|
||||
$dbh->trace_msg("_std_response_attribute_names for $driver_name $h_type: @attr_names\n");
|
||||
|
||||
# cache into the dbh even for sth, as the dbh is usually longer lived
|
||||
return $dbh->{"private_gofer_std_attr_names_$h_type"} = \@attr_names;
|
||||
}
|
||||
|
||||
|
||||
sub execute_sth_request {
|
||||
my ($self, $request) = @_;
|
||||
my $dbh;
|
||||
my $sth;
|
||||
my $last_insert_id;
|
||||
my $stats = $self->{stats};
|
||||
|
||||
my $rv = eval {
|
||||
$dbh = $self->_connect($request);
|
||||
|
||||
my $args = $request->dbh_method_call; # [ wantarray, 'method_name', @args ]
|
||||
shift @$args; # discard wantarray
|
||||
my $meth = shift @$args;
|
||||
$stats->{method_calls_sth}->{$meth}++;
|
||||
$sth = $dbh->$meth(@$args);
|
||||
my $last = '(sth)'; # a true value (don't try to return actual sth)
|
||||
|
||||
# execute methods on the sth, e.g., bind_param & execute
|
||||
if (my $calls = $request->sth_method_calls) {
|
||||
for my $meth_call (@$calls) {
|
||||
my $method = shift @$meth_call;
|
||||
$stats->{method_calls_sth}->{$method}++;
|
||||
$last = $sth->$method(@$meth_call);
|
||||
}
|
||||
}
|
||||
|
||||
if (my $lid_args = $request->dbh_last_insert_id_args) {
|
||||
$stats->{method_calls_sth}->{last_insert_id}++;
|
||||
$last_insert_id = $dbh->last_insert_id( @$lid_args );
|
||||
}
|
||||
|
||||
$last;
|
||||
};
|
||||
my $response = $self->new_response_with_err($rv, $@, $dbh);
|
||||
|
||||
return $response if not $dbh;
|
||||
|
||||
$response->last_insert_id( $last_insert_id )
|
||||
if defined $last_insert_id;
|
||||
|
||||
# even if the eval failed we still want to try to gather attribute values
|
||||
# (XXX would be nice to be able to support streaming of results.
|
||||
# which would reduce memory usage and latency for large results)
|
||||
if ($sth) {
|
||||
$response->sth_resultsets( $self->gather_sth_resultsets($sth, $request, $response) );
|
||||
$sth->finish;
|
||||
}
|
||||
|
||||
# does this request also want any dbh attributes returned?
|
||||
my $dbh_attr_set;
|
||||
if (my $dbh_attributes = $request->dbh_attributes) {
|
||||
$dbh_attr_set = $self->gather_dbh_attributes($dbh, $dbh_attributes);
|
||||
}
|
||||
# XXX needs to be integrated with private_attribute_info() etc
|
||||
if (my $dbh_attr = $extra_attr{$dbh->{Driver}{Name}}{dbh_after_sth}) {
|
||||
@{$dbh_attr_set}{@$dbh_attr} = $dbh->FETCH_many(@$dbh_attr);
|
||||
}
|
||||
$response->dbh_attributes($dbh_attr_set) if $dbh_attr_set && %$dbh_attr_set;
|
||||
|
||||
$self->reset_dbh($dbh);
|
||||
|
||||
return $response;
|
||||
}
|
||||
|
||||
|
||||
sub gather_sth_resultsets {
|
||||
my ($self, $sth, $request, $response) = @_;
|
||||
my $resultsets = eval {
|
||||
|
||||
my $attr_names = $self->_std_response_attribute_names($sth);
|
||||
my $sth_attr = {};
|
||||
$sth_attr->{$_} = 1 for @$attr_names;
|
||||
|
||||
# let the client add/remove sth attributes
|
||||
if (my $sth_result_attr = $request->sth_result_attr) {
|
||||
$sth_attr->{$_} = $sth_result_attr->{$_}
|
||||
for keys %$sth_result_attr;
|
||||
}
|
||||
my @sth_attr = grep { $sth_attr->{$_} } keys %$sth_attr;
|
||||
|
||||
my $row_count = 0;
|
||||
my $rs_list = [];
|
||||
while (1) {
|
||||
my $rs = $self->fetch_result_set($sth, \@sth_attr);
|
||||
push @$rs_list, $rs;
|
||||
if (my $rows = $rs->{rowset}) {
|
||||
$row_count += @$rows;
|
||||
}
|
||||
last if $self->{forced_single_resultset};
|
||||
last if !($sth->more_results || $sth->{syb_more_results});
|
||||
}
|
||||
|
||||
my $stats = $self->{stats};
|
||||
$stats->{rows_returned_total} += $row_count;
|
||||
$stats->{rows_returned_max} = $row_count
|
||||
if $row_count > ($stats->{rows_returned_max}||0);
|
||||
|
||||
$rs_list;
|
||||
};
|
||||
$response->add_err(1, $@) if $@;
|
||||
return $resultsets;
|
||||
}
|
||||
|
||||
|
||||
sub fetch_result_set {
|
||||
my ($self, $sth, $sth_attr) = @_;
|
||||
my %meta;
|
||||
eval {
|
||||
@meta{ @$sth_attr } = $sth->FETCH_many(@$sth_attr);
|
||||
# we assume @$sth_attr contains NUM_OF_FIELDS
|
||||
$meta{rowset} = $sth->fetchall_arrayref()
|
||||
if (($meta{NUM_OF_FIELDS}||0) > 0); # is SELECT
|
||||
# the fetchall_arrayref may fail with a 'not executed' kind of error
|
||||
# because gather_sth_resultsets/fetch_result_set are called even if
|
||||
# execute() failed, or even if there was no execute() call at all.
|
||||
# The corresponding error goes into the resultset err, not the top-level
|
||||
# response err, so in most cases this resultset err is never noticed.
|
||||
};
|
||||
if ($@) {
|
||||
chomp $@;
|
||||
$meta{err} = $DBI::err || 1;
|
||||
$meta{errstr} = $DBI::errstr || $@;
|
||||
$meta{state} = $DBI::state;
|
||||
}
|
||||
return \%meta;
|
||||
}
|
||||
|
||||
|
||||
sub _get_default_methods {
|
||||
my ($dbh) = @_;
|
||||
# returns a ref to a hash of dbh method names for methods which the driver
|
||||
# hasn't overridden i.e., quote(). These don't need to be forwarded via gofer.
|
||||
my $ImplementorClass = $dbh->{ImplementorClass} or die;
|
||||
my %default_methods;
|
||||
for my $method (@all_dbh_methods) {
|
||||
my $dbi_sub = $all_dbh_methods{$method} || 42;
|
||||
my $imp_sub = $ImplementorClass->can($method) || 42;
|
||||
next if $imp_sub != $dbi_sub;
|
||||
#warn("default $method\n");
|
||||
$default_methods{$method} = 1;
|
||||
}
|
||||
return \%default_methods;
|
||||
}
|
||||
|
||||
|
||||
# XXX would be nice to make this a generic DBI module
|
||||
sub _install_rand_callbacks {
|
||||
my ($self, $dbh, $dbi_gofer_random) = @_;
|
||||
|
||||
my $callbacks = $dbh->{Callbacks} || {};
|
||||
my $prev = $dbh->{private_gofer_rand_fail_callbacks} || {};
|
||||
|
||||
# return if we've already setup this handle with callbacks for these specs
|
||||
return if (($callbacks->{_dbi_gofer_random_spec}||'') eq $dbi_gofer_random);
|
||||
#warn "$dbh # $callbacks->{_dbi_gofer_random_spec}";
|
||||
$callbacks->{_dbi_gofer_random_spec} = $dbi_gofer_random;
|
||||
|
||||
my ($fail_percent, $fail_err, $delay_percent, $delay_duration, %spec_part, @spec_note);
|
||||
my @specs = split /,/, $dbi_gofer_random;
|
||||
for my $spec (@specs) {
|
||||
if ($spec =~ m/^fail=(-?[.\d]+)%?$/) {
|
||||
$fail_percent = $1;
|
||||
$spec_part{fail} = $spec;
|
||||
next;
|
||||
}
|
||||
if ($spec =~ m/^err=(-?\d+)$/) {
|
||||
$fail_err = $1;
|
||||
$spec_part{err} = $spec;
|
||||
next;
|
||||
}
|
||||
if ($spec =~ m/^delay([.\d]+)=(-?[.\d]+)%?$/) {
|
||||
$delay_duration = $1;
|
||||
$delay_percent = $2;
|
||||
$spec_part{delay} = $spec;
|
||||
next;
|
||||
}
|
||||
elsif ($spec !~ m/^(\w+|\*)$/) {
|
||||
warn "Ignored DBI_GOFER_RANDOM item '$spec' which isn't a config or a dbh method name";
|
||||
next;
|
||||
}
|
||||
|
||||
my $method = $spec;
|
||||
if ($callbacks->{$method} && $prev->{$method} && $callbacks->{$method} != $prev->{$method}) {
|
||||
warn "Callback for $method method already installed so DBI_GOFER_RANDOM callback not installed\n";
|
||||
next;
|
||||
}
|
||||
unless (defined $fail_percent or defined $delay_percent) {
|
||||
warn "Ignored DBI_GOFER_RANDOM item '$spec' because not preceded by 'fail=N' and/or 'delayN=N'";
|
||||
next;
|
||||
}
|
||||
|
||||
push @spec_note, join(",", values(%spec_part), $method);
|
||||
$callbacks->{$method} = $self->_mk_rand_callback($method, $fail_percent, $delay_percent, $delay_duration, $fail_err);
|
||||
}
|
||||
warn "DBI_GOFER_RANDOM failures/delays enabled: @spec_note\n"
|
||||
if @spec_note;
|
||||
$dbh->{Callbacks} = $callbacks;
|
||||
$dbh->{private_gofer_rand_fail_callbacks} = $callbacks;
|
||||
}
|
||||
|
||||
my %_mk_rand_callback_seqn;
|
||||
|
||||
sub _mk_rand_callback {
|
||||
my ($self, $method, $fail_percent, $delay_percent, $delay_duration, $fail_err) = @_;
|
||||
my ($fail_modrate, $delay_modrate);
|
||||
$fail_percent ||= 0; $fail_modrate = int(1/(-$fail_percent )*100) if $fail_percent;
|
||||
$delay_percent ||= 0; $delay_modrate = int(1/(-$delay_percent)*100) if $delay_percent;
|
||||
# note that $method may be "*" but that's not recommended or documented or wise
|
||||
return sub {
|
||||
my ($h) = @_;
|
||||
my $seqn = ++$_mk_rand_callback_seqn{$method};
|
||||
my $delay = ($delay_percent > 0) ? rand(100) < $delay_percent :
|
||||
($delay_percent < 0) ? !($seqn % $delay_modrate): 0;
|
||||
my $fail = ($fail_percent > 0) ? rand(100) < $fail_percent :
|
||||
($fail_percent < 0) ? !($seqn % $fail_modrate) : 0;
|
||||
#no warnings 'uninitialized';
|
||||
#warn "_mk_rand_callback($fail_percent:$fail_modrate, $delay_percent:$delay_modrate): seqn=$seqn fail=$fail delay=$delay";
|
||||
if ($delay) {
|
||||
my $msg = "DBI_GOFER_RANDOM delaying execution of $method() by $delay_duration seconds\n";
|
||||
# Note what's happening in a trace message. If the delay percent is an even
|
||||
# number then use warn() instead so it's sent back to the client.
|
||||
($delay_percent % 2 == 1) ? warn($msg) : $h->trace_msg($msg);
|
||||
select undef, undef, undef, $delay_duration; # allows floating point value
|
||||
}
|
||||
if ($fail) {
|
||||
undef $_; # tell DBI to not call the method
|
||||
# the "induced by DBI_GOFER_RANDOM" is special and must be included in errstr
|
||||
# as it's checked for in a few places, such as the gofer retry logic
|
||||
return $h->set_err($fail_err || $DBI::stderr,
|
||||
"fake error from $method method induced by DBI_GOFER_RANDOM env var ($fail_percent%)");
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
sub update_stats {
|
||||
my ($self,
|
||||
$request, $response,
|
||||
$frozen_request, $frozen_response,
|
||||
$time_received,
|
||||
$store_meta, $other_meta,
|
||||
) = @_;
|
||||
|
||||
# should always have a response object here
|
||||
carp("No response object provided") unless $request;
|
||||
|
||||
my $stats = $self->{stats};
|
||||
$stats->{frozen_request_max_bytes} = length($frozen_request)
|
||||
if $frozen_request
|
||||
&& length($frozen_request) > ($stats->{frozen_request_max_bytes}||0);
|
||||
$stats->{frozen_response_max_bytes} = length($frozen_response)
|
||||
if $frozen_response
|
||||
&& length($frozen_response) > ($stats->{frozen_response_max_bytes}||0);
|
||||
|
||||
my $recent;
|
||||
if (my $track_recent = $self->{track_recent}) {
|
||||
$recent = {
|
||||
request => $frozen_request,
|
||||
response => $frozen_response,
|
||||
time_received => $time_received,
|
||||
duration => dbi_time()-$time_received,
|
||||
# for any other info
|
||||
($store_meta) ? (meta => $store_meta) : (),
|
||||
};
|
||||
$recent->{request_object} = $request
|
||||
if !$frozen_request && $request;
|
||||
$recent->{response_object} = $response
|
||||
if !$frozen_response;
|
||||
my @queues = ($stats->{recent_requests} ||= []);
|
||||
push @queues, ($stats->{recent_errors} ||= [])
|
||||
if !$response or $response->err;
|
||||
for my $queue (@queues) {
|
||||
push @$queue, $recent;
|
||||
shift @$queue if @$queue > $track_recent;
|
||||
}
|
||||
}
|
||||
return $recent;
|
||||
}
|
||||
|
||||
|
||||
1;
|
||||
__END__
|
||||
|
||||
=head1 NAME
|
||||
|
||||
DBI::Gofer::Execute - Executes Gofer requests and returns Gofer responses
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
$executor = DBI::Gofer::Execute->new( { ...config... });
|
||||
|
||||
$response = $executor->execute_request( $request );
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
Accepts a DBI::Gofer::Request object, executes the requested DBI method calls,
|
||||
and returns a DBI::Gofer::Response object.
|
||||
|
||||
Any error, including any internal 'fatal' errors are caught and converted into
|
||||
a DBI::Gofer::Response object.
|
||||
|
||||
This module is usually invoked by a 'server-side' Gofer transport module.
|
||||
They usually have names in the "C<DBI::Gofer::Transport::*>" namespace.
|
||||
Examples include: L<DBI::Gofer::Transport::stream> and L<DBI::Gofer::Transport::mod_perl>.
|
||||
|
||||
=head1 CONFIGURATION
|
||||
|
||||
=head2 check_request_sub
|
||||
|
||||
If defined, it must be a reference to a subroutine that will 'check' the request.
|
||||
It is passed the request object and the executor as its only arguments.
|
||||
|
||||
The subroutine can either return the original request object or die with a
|
||||
suitable error message (which will be turned into a Gofer response).
|
||||
|
||||
It can also construct and return a new request that should be executed instead
|
||||
of the original request.
|
||||
|
||||
=head2 check_response_sub
|
||||
|
||||
If defined, it must be a reference to a subroutine that will 'check' the response.
|
||||
It is passed the response object, the executor, and the request object.
|
||||
The sub may alter the response object and return undef, or return a new response object.
|
||||
|
||||
This mechanism can be used to, for example, terminate the service if specific
|
||||
database errors are seen.
|
||||
|
||||
=head2 forced_connect_dsn
|
||||
|
||||
If set, this DSN is always used instead of the one in the request.
|
||||
|
||||
=head2 default_connect_dsn
|
||||
|
||||
If set, this DSN is used if C<forced_connect_dsn> is not set and the request does not contain a DSN itself.
|
||||
|
||||
=head2 forced_connect_attributes
|
||||
|
||||
A reference to a hash of connect() attributes. Individual attributes in
|
||||
C<forced_connect_attributes> will take precedence over corresponding attributes
|
||||
in the request.
|
||||
|
||||
=head2 default_connect_attributes
|
||||
|
||||
A reference to a hash of connect() attributes. Individual attributes in the
|
||||
request take precedence over corresponding attributes in C<default_connect_attributes>.
|
||||
|
||||
=head2 max_cached_dbh_per_drh
|
||||
|
||||
If set, the loaded drivers will be checked to ensure they don't have more than
|
||||
this number of cached connections. There is no default value. This limit is not
|
||||
enforced for every request.
|
||||
|
||||
=head2 max_cached_sth_per_dbh
|
||||
|
||||
If set, all the cached statement handles will be cleared once the number of
|
||||
cached statement handles rises above this limit. The default is 1000.
|
||||
|
||||
=head2 forced_single_resultset
|
||||
|
||||
If true, then only the first result set will be fetched and returned in the response.
|
||||
|
||||
=head2 forced_response_attributes
|
||||
|
||||
A reference to a data structure that can specify extra attributes to be returned in responses.
|
||||
|
||||
forced_response_attributes => {
|
||||
DriverName => {
|
||||
dbh => [ qw(dbh_attrib_name) ],
|
||||
sth => [ qw(sth_attrib_name) ],
|
||||
},
|
||||
},
|
||||
|
||||
This can be useful in cases where the driver has not implemented the
|
||||
private_attribute_info() method and DBI::Gofer::Execute's own fallback list of
|
||||
private attributes doesn't include the driver or attributes you need.
|
||||
|
||||
=head2 track_recent
|
||||
|
||||
If set, specifies the number of recent requests and responses that should be
|
||||
kept by the update_stats() method for diagnostics. See L<DBI::Gofer::Transport::mod_perl>.
|
||||
|
||||
Note that this setting can significantly increase memory use. Use with caution.
|
||||
|
||||
=head2 forced_gofer_random
|
||||
|
||||
Enable forced random failures and/or delays for testing. See L</DBI_GOFER_RANDOM> below.
|
||||
|
||||
=head1 DRIVER-SPECIFIC ISSUES
|
||||
|
||||
Gofer needs to know about any driver-private attributes that should have their
|
||||
values sent back to the client.
|
||||
|
||||
If the driver doesn't support private_attribute_info() method, and very few do,
|
||||
then the module fallsback to using some hard-coded details, if available, for
|
||||
the driver being used. Currently hard-coded details are available for the
|
||||
mysql, Pg, Sybase, and SQLite drivers.
|
||||
|
||||
=head1 TESTING
|
||||
|
||||
DBD::Gofer, DBD::Execute and related packages are well tested by executing the
|
||||
DBI test suite with DBI_AUTOPROXY configured to route all DBI calls via DBD::Gofer.
|
||||
|
||||
Because Gofer includes timeout and 'retry on error' mechanisms there is a need
|
||||
for some way to trigger delays and/or errors. This can be done via the
|
||||
C<forced_gofer_random> configuration item, or else the DBI_GOFER_RANDOM environment
|
||||
variable.
|
||||
|
||||
=head2 DBI_GOFER_RANDOM
|
||||
|
||||
The value of the C<forced_gofer_random> configuration item (or else the
|
||||
DBI_GOFER_RANDOM environment variable) is treated as a series of tokens
|
||||
separated by commas.
|
||||
|
||||
The tokens can be one of three types:
|
||||
|
||||
=over 4
|
||||
|
||||
=item fail=R%
|
||||
|
||||
Set the current failure rate to R where R is a percentage.
|
||||
The value R can be floating point, e.g., C<fail=0.05%>.
|
||||
Negative values for R have special meaning, see below.
|
||||
|
||||
=item err=N
|
||||
|
||||
Sets the current failure err value to N (instead of the DBI's default 'standard
|
||||
err value' of 2000000000). This is useful when you want to simulate a
|
||||
specific error.
|
||||
|
||||
=item delayN=R%
|
||||
|
||||
Set the current random delay rate to R where R is a percentage, and set the
|
||||
current delay duration to N seconds. The values of R and N can be floating point,
|
||||
e.g., C<delay0.5=0.2%>. Negative values for R have special meaning, see below.
|
||||
|
||||
If R is an odd number (R % 2 == 1) then a message is logged via warn() which
|
||||
will be returned to, and echoed at, the client.
|
||||
|
||||
=item methodname
|
||||
|
||||
Applies the current fail, err, and delay values to the named method.
|
||||
If neither a fail nor delay have been set yet then a warning is generated.
|
||||
|
||||
=back
|
||||
|
||||
For example:
|
||||
|
||||
$executor = DBI::Gofer::Execute->new( {
|
||||
forced_gofer_random => "fail=0.01%,do,delay60=1%,execute",
|
||||
});
|
||||
|
||||
will cause the do() method to fail for 0.01% of calls, and the execute() method to
|
||||
fail 0.01% of calls and be delayed by 60 seconds on 1% of calls.
|
||||
|
||||
If the percentage value (C<R>) is negative then instead of the failures being
|
||||
triggered randomly (via the rand() function) they are triggered via a sequence
|
||||
number. In other words "C<fail=-20%>" will mean every fifth call will fail.
|
||||
Each method has a distinct sequence number.
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Tim Bunce, L<http://www.tim.bunce.name>
|
||||
|
||||
=head1 LICENCE AND COPYRIGHT
|
||||
|
||||
Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
|
||||
|
||||
This module is free software; you can redistribute it and/or
|
||||
modify it under the same terms as Perl itself. See L<perlartistic>.
|
||||
|
||||
=cut
|
|
@ -0,0 +1,200 @@
|
|||
package DBI::Gofer::Request;
|
||||
|
||||
# $Id: Request.pm 12536 2009-02-24 22:37:09Z Tim $
|
||||
#
|
||||
# Copyright (c) 2007, Tim Bunce, Ireland
|
||||
#
|
||||
# You may distribute under the terms of either the GNU General Public
|
||||
# License or the Artistic License, as specified in the Perl README file.
|
||||
|
||||
use strict;
|
||||
|
||||
use DBI qw(neat neat_list);
|
||||
|
||||
use base qw(DBI::Util::_accessor);
|
||||
|
||||
our $VERSION = "0.012537";
|
||||
|
||||
use constant GOf_REQUEST_IDEMPOTENT => 0x0001;
|
||||
use constant GOf_REQUEST_READONLY => 0x0002;
|
||||
|
||||
our @EXPORT = qw(GOf_REQUEST_IDEMPOTENT GOf_REQUEST_READONLY);
|
||||
|
||||
|
||||
__PACKAGE__->mk_accessors(qw(
|
||||
version
|
||||
flags
|
||||
dbh_connect_call
|
||||
dbh_method_call
|
||||
dbh_attributes
|
||||
dbh_last_insert_id_args
|
||||
sth_method_calls
|
||||
sth_result_attr
|
||||
));
|
||||
__PACKAGE__->mk_accessors_using(make_accessor_autoviv_hashref => qw(
|
||||
meta
|
||||
));
|
||||
|
||||
|
||||
sub new {
|
||||
my ($self, $args) = @_;
|
||||
$args->{version} ||= $VERSION;
|
||||
return $self->SUPER::new($args);
|
||||
}
|
||||
|
||||
|
||||
sub reset {
|
||||
my ($self, $flags) = @_;
|
||||
# remove everything except connect and version
|
||||
%$self = (
|
||||
version => $self->{version},
|
||||
dbh_connect_call => $self->{dbh_connect_call},
|
||||
);
|
||||
$self->{flags} = $flags if $flags;
|
||||
}
|
||||
|
||||
|
||||
sub init_request {
|
||||
my ($self, $method_and_args, $dbh) = @_;
|
||||
$self->reset( $dbh->{ReadOnly} ? GOf_REQUEST_READONLY : 0 );
|
||||
$self->dbh_method_call($method_and_args);
|
||||
}
|
||||
|
||||
|
||||
sub is_sth_request {
|
||||
return shift->{sth_result_attr};
|
||||
}
|
||||
|
||||
|
||||
sub statements {
|
||||
my $self = shift;
|
||||
my @statements;
|
||||
if (my $dbh_method_call = $self->dbh_method_call) {
|
||||
my $statement_method_regex = qr/^(?:do|prepare)$/;
|
||||
my (undef, $method, $arg1) = @$dbh_method_call;
|
||||
push @statements, $arg1 if $method && $method =~ $statement_method_regex;
|
||||
}
|
||||
return @statements;
|
||||
}
|
||||
|
||||
|
||||
sub is_idempotent {
|
||||
my $self = shift;
|
||||
|
||||
if (my $flags = $self->flags) {
|
||||
return 1 if $flags & (GOf_REQUEST_IDEMPOTENT|GOf_REQUEST_READONLY);
|
||||
}
|
||||
|
||||
# else check if all statements are SELECT statement that don't include FOR UPDATE
|
||||
my @statements = $self->statements;
|
||||
# XXX this is very minimal for now, doesn't even allow comments before the select
|
||||
# (and can't ever work for "exec stored_procedure_name" kinds of statements)
|
||||
# XXX it also doesn't deal with multiple statements: prepare("select foo; update bar")
|
||||
return 1 if @statements == grep {
|
||||
m/^ \s* SELECT \b /xmsi && !m/ \b FOR \s+ UPDATE \b /xmsi
|
||||
} @statements;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
sub summary_as_text {
|
||||
my $self = shift;
|
||||
my ($context) = @_;
|
||||
my @s = '';
|
||||
|
||||
if ($context && %$context) {
|
||||
my @keys = sort keys %$context;
|
||||
push @s, join(", ", map { "$_=>".$context->{$_} } @keys);
|
||||
}
|
||||
|
||||
my ($method, $dsn, $user, $pass, $attr) = @{ $self->dbh_connect_call };
|
||||
$method ||= 'connect_cached';
|
||||
$pass = '***' if defined $pass;
|
||||
my $tmp = '';
|
||||
if ($attr) {
|
||||
$tmp = { %{$attr||{}} }; # copy so we can edit
|
||||
$tmp->{Password} = '***' if exists $tmp->{Password};
|
||||
$tmp = "{ ".neat_list([ %$tmp ])." }";
|
||||
}
|
||||
push @s, sprintf "dbh= $method(%s, %s)", neat_list([$dsn, $user, $pass]), $tmp;
|
||||
|
||||
if (my $flags = $self->flags) {
|
||||
push @s, sprintf "flags: 0x%x", $flags;
|
||||
}
|
||||
|
||||
if (my $dbh_attr = $self->dbh_attributes) {
|
||||
push @s, sprintf "dbh->FETCH: %s", @$dbh_attr
|
||||
if @$dbh_attr;
|
||||
}
|
||||
|
||||
my ($wantarray, $meth, @args) = @{ $self->dbh_method_call };
|
||||
my $args = neat_list(\@args);
|
||||
$args =~ s/\n+/ /g;
|
||||
push @s, sprintf "dbh->%s(%s)", $meth, $args;
|
||||
|
||||
if (my $lii_args = $self->dbh_last_insert_id_args) {
|
||||
push @s, sprintf "dbh->last_insert_id(%s)", neat_list($lii_args);
|
||||
}
|
||||
|
||||
for my $call (@{ $self->sth_method_calls || [] }) {
|
||||
my ($meth, @args) = @$call;
|
||||
($args = neat_list(\@args)) =~ s/\n+/ /g;
|
||||
push @s, sprintf "sth->%s(%s)", $meth, $args;
|
||||
}
|
||||
|
||||
if (my $sth_attr = $self->sth_result_attr) {
|
||||
push @s, sprintf "sth->FETCH: %s", %$sth_attr
|
||||
if %$sth_attr;
|
||||
}
|
||||
|
||||
return join("\n\t", @s) . "\n";
|
||||
}
|
||||
|
||||
|
||||
sub outline_as_text { # one-line version of summary_as_text
|
||||
my $self = shift;
|
||||
my @s = '';
|
||||
my $neatlen = 80;
|
||||
|
||||
if (my $flags = $self->flags) {
|
||||
push @s, sprintf "flags=0x%x", $flags;
|
||||
}
|
||||
|
||||
my (undef, $meth, @args) = @{ $self->dbh_method_call };
|
||||
push @s, sprintf "%s(%s)", $meth, neat_list(\@args, $neatlen);
|
||||
|
||||
for my $call (@{ $self->sth_method_calls || [] }) {
|
||||
my ($meth, @args) = @$call;
|
||||
push @s, sprintf "%s(%s)", $meth, neat_list(\@args, $neatlen);
|
||||
}
|
||||
|
||||
my ($method, $dsn) = @{ $self->dbh_connect_call };
|
||||
push @s, "$method($dsn,...)"; # dsn last as it's usually less interesting
|
||||
|
||||
(my $outline = join("; ", @s)) =~ s/\s+/ /g; # squish whitespace, incl newlines
|
||||
return $outline;
|
||||
}
|
||||
|
||||
1;
|
||||
|
||||
=head1 NAME
|
||||
|
||||
DBI::Gofer::Request - Encapsulate a request from DBD::Gofer to DBI::Gofer::Execute
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
This is an internal class.
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Tim Bunce, L<http://www.tim.bunce.name>
|
||||
|
||||
=head1 LICENCE AND COPYRIGHT
|
||||
|
||||
Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
|
||||
|
||||
This module is free software; you can redistribute it and/or
|
||||
modify it under the same terms as Perl itself. See L<perlartistic>.
|
||||
|
||||
=cut
|
|
@ -0,0 +1,218 @@
|
|||
package DBI::Gofer::Response;
|
||||
|
||||
# $Id: Response.pm 11565 2008-07-22 20:17:33Z Tim $
|
||||
#
|
||||
# Copyright (c) 2007, Tim Bunce, Ireland
|
||||
#
|
||||
# You may distribute under the terms of either the GNU General Public
|
||||
# License or the Artistic License, as specified in the Perl README file.
|
||||
|
||||
use strict;
|
||||
|
||||
use Carp;
|
||||
use DBI qw(neat neat_list);
|
||||
|
||||
use base qw(DBI::Util::_accessor Exporter);
|
||||
|
||||
our $VERSION = "0.011566";
|
||||
|
||||
use constant GOf_RESPONSE_EXECUTED => 0x0001;
|
||||
|
||||
our @EXPORT = qw(GOf_RESPONSE_EXECUTED);
|
||||
|
||||
|
||||
__PACKAGE__->mk_accessors(qw(
|
||||
version
|
||||
rv
|
||||
err
|
||||
errstr
|
||||
state
|
||||
flags
|
||||
last_insert_id
|
||||
dbh_attributes
|
||||
sth_resultsets
|
||||
warnings
|
||||
));
|
||||
__PACKAGE__->mk_accessors_using(make_accessor_autoviv_hashref => qw(
|
||||
meta
|
||||
));
|
||||
|
||||
|
||||
sub new {
|
||||
my ($self, $args) = @_;
|
||||
$args->{version} ||= $VERSION;
|
||||
chomp $args->{errstr} if $args->{errstr};
|
||||
return $self->SUPER::new($args);
|
||||
}
|
||||
|
||||
|
||||
sub err_errstr_state {
|
||||
my $self = shift;
|
||||
return @{$self}{qw(err errstr state)};
|
||||
}
|
||||
|
||||
sub executed_flag_set {
|
||||
my $flags = shift->flags
|
||||
or return 0;
|
||||
return $flags & GOf_RESPONSE_EXECUTED;
|
||||
}
|
||||
|
||||
|
||||
sub add_err {
|
||||
my ($self, $err, $errstr, $state, $trace) = @_;
|
||||
|
||||
# acts like the DBI's set_err method.
|
||||
# this code copied from DBI::PurePerl's set_err method.
|
||||
|
||||
chomp $errstr if $errstr;
|
||||
$state ||= '';
|
||||
carp ref($self)."->add_err($err, $errstr, $state)"
|
||||
if $trace and defined($err) || $errstr;
|
||||
|
||||
my ($r_err, $r_errstr, $r_state) = ($self->{err}, $self->{errstr}, $self->{state});
|
||||
|
||||
if ($r_errstr) {
|
||||
$r_errstr .= sprintf " [err was %s now %s]", $r_err, $err
|
||||
if $r_err && $err && $r_err ne $err;
|
||||
$r_errstr .= sprintf " [state was %s now %s]", $r_state, $state
|
||||
if $r_state and $r_state ne "S1000" && $state && $r_state ne $state;
|
||||
$r_errstr .= "\n$errstr" if $r_errstr ne $errstr;
|
||||
}
|
||||
else {
|
||||
$r_errstr = $errstr;
|
||||
}
|
||||
|
||||
# assign if higher priority: err > "0" > "" > undef
|
||||
my $err_changed;
|
||||
if ($err # new error: so assign
|
||||
or !defined $r_err # no existing warn/info: so assign
|
||||
# new warn ("0" len 1) > info ("" len 0): so assign
|
||||
or defined $err && length($err) > length($r_err)
|
||||
) {
|
||||
$r_err = $err;
|
||||
++$err_changed;
|
||||
}
|
||||
|
||||
$r_state = ($state eq "00000") ? "" : $state
|
||||
if $state && $err_changed;
|
||||
|
||||
($self->{err}, $self->{errstr}, $self->{state}) = ($r_err, $r_errstr, $r_state);
|
||||
|
||||
return undef;
|
||||
}
|
||||
|
||||
|
||||
sub summary_as_text {
|
||||
my $self = shift;
|
||||
my ($context) = @_;
|
||||
|
||||
my ($rv, $err, $errstr, $state) = ($self->{rv}, $self->{err}, $self->{errstr}, $self->{state});
|
||||
|
||||
my @s = sprintf("\trv=%s", (ref $rv) ? "[".neat_list($rv)."]" : neat($rv));
|
||||
$s[-1] .= sprintf(", err=%s, errstr=%s", $err, neat($errstr))
|
||||
if defined $err;
|
||||
$s[-1] .= sprintf(", flags=0x%x", $self->{flags})
|
||||
if defined $self->{flags};
|
||||
|
||||
push @s, "last_insert_id=%s", $self->last_insert_id
|
||||
if defined $self->last_insert_id;
|
||||
|
||||
if (my $dbh_attr = $self->dbh_attributes) {
|
||||
my @keys = sort keys %$dbh_attr;
|
||||
push @s, sprintf "dbh= { %s }", join(", ", map { "$_=>".neat($dbh_attr->{$_},100) } @keys)
|
||||
if @keys;
|
||||
}
|
||||
|
||||
for my $rs (@{$self->sth_resultsets || []}) {
|
||||
my ($rowset, $err, $errstr, $state)
|
||||
= @{$rs}{qw(rowset err errstr state)};
|
||||
my $summary = "rowset: ";
|
||||
my $NUM_OF_FIELDS = $rs->{NUM_OF_FIELDS} || 0;
|
||||
my $rows = $rowset ? @$rowset : 0;
|
||||
if ($rowset || $NUM_OF_FIELDS > 0) {
|
||||
$summary .= sprintf "%d rows, %d columns", $rows, $NUM_OF_FIELDS;
|
||||
}
|
||||
$summary .= sprintf ", err=%s, errstr=%s", $err, neat($errstr) if defined $err;
|
||||
if ($rows) {
|
||||
my $NAME = $rs->{NAME};
|
||||
# generate
|
||||
my @colinfo = map { "$NAME->[$_]=".neat($rowset->[0][$_], 30) } 0..@{$NAME}-1;
|
||||
$summary .= sprintf " [%s]", join ", ", @colinfo;
|
||||
$summary .= ",..." if $rows > 1;
|
||||
# we can be a little more helpful for Sybase/MSSQL user
|
||||
$summary .= " syb_result_type=$rs->{syb_result_type}"
|
||||
if $rs->{syb_result_type} and $rs->{syb_result_type} != 4040;
|
||||
}
|
||||
push @s, $summary;
|
||||
}
|
||||
for my $w (@{$self->warnings || []}) {
|
||||
chomp $w;
|
||||
push @s, "warning: $w";
|
||||
}
|
||||
if ($context && %$context) {
|
||||
my @keys = sort keys %$context;
|
||||
push @s, join(", ", map { "$_=>".$context->{$_} } @keys);
|
||||
}
|
||||
return join("\n\t", @s). "\n";
|
||||
}
|
||||
|
||||
|
||||
sub outline_as_text { # one-line version of summary_as_text
|
||||
my $self = shift;
|
||||
my ($context) = @_;
|
||||
|
||||
my ($rv, $err, $errstr, $state) = ($self->{rv}, $self->{err}, $self->{errstr}, $self->{state});
|
||||
|
||||
my $s = sprintf("rv=%s", (ref $rv) ? "[".neat_list($rv)."]" : neat($rv));
|
||||
$s .= sprintf(", err=%s %s", $err, neat($errstr))
|
||||
if defined $err;
|
||||
$s .= sprintf(", flags=0x%x", $self->{flags})
|
||||
if $self->{flags};
|
||||
|
||||
if (my $sth_resultsets = $self->sth_resultsets) {
|
||||
$s .= sprintf(", %d resultsets ", scalar @$sth_resultsets);
|
||||
|
||||
my @rs;
|
||||
for my $rs (@{$self->sth_resultsets || []}) {
|
||||
my $summary = "";
|
||||
my ($rowset, $err, $errstr)
|
||||
= @{$rs}{qw(rowset err errstr)};
|
||||
my $NUM_OF_FIELDS = $rs->{NUM_OF_FIELDS} || 0;
|
||||
my $rows = $rowset ? @$rowset : 0;
|
||||
if ($rowset || $NUM_OF_FIELDS > 0) {
|
||||
$summary .= sprintf "%dr x %dc", $rows, $NUM_OF_FIELDS;
|
||||
}
|
||||
$summary .= sprintf "%serr %s %s", ($summary?", ":""), $err, neat($errstr)
|
||||
if defined $err;
|
||||
push @rs, $summary;
|
||||
}
|
||||
$s .= join "; ", map { "[$_]" } @rs;
|
||||
}
|
||||
|
||||
return $s;
|
||||
}
|
||||
|
||||
|
||||
1;
|
||||
|
||||
=head1 NAME
|
||||
|
||||
DBI::Gofer::Response - Encapsulate a response from DBI::Gofer::Execute to DBD::Gofer
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
This is an internal class.
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Tim Bunce, L<http://www.tim.bunce.name>
|
||||
|
||||
=head1 LICENCE AND COPYRIGHT
|
||||
|
||||
Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
|
||||
|
||||
This module is free software; you can redistribute it and/or
|
||||
modify it under the same terms as Perl itself. See L<perlartistic>.
|
||||
|
||||
=cut
|
||||
|
|
@ -0,0 +1,64 @@
|
|||
package DBI::Gofer::Serializer::Base;
|
||||
|
||||
# $Id: Base.pm 9949 2007-09-18 09:38:15Z Tim $
|
||||
#
|
||||
# Copyright (c) 2007, Tim Bunce, Ireland
|
||||
#
|
||||
# You may distribute under the terms of either the GNU General Public
|
||||
# License or the Artistic License, as specified in the Perl README file.
|
||||
|
||||
=head1 NAME
|
||||
|
||||
DBI::Gofer::Serializer::Base - base class for Gofer serialization
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
$serializer = $serializer_class->new();
|
||||
|
||||
$string = $serializer->serialize( $data );
|
||||
($string, $deserializer_class) = $serializer->serialize( $data );
|
||||
|
||||
$data = $serializer->deserialize( $string );
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
DBI::Gofer::Serializer::* classes implement a very minimal subset of the L<Data::Serializer> API.
|
||||
|
||||
Gofer serializers are expected to be very fast and are not required to deal
|
||||
with anything other than non-blessed references to arrays and hashes, and plain scalars.
|
||||
|
||||
=cut
|
||||
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use Carp qw(croak);
|
||||
|
||||
our $VERSION = "0.009950";
|
||||
|
||||
|
||||
sub new {
|
||||
my $class = shift;
|
||||
my $deserializer_class = $class->deserializer_class;
|
||||
return bless { deserializer_class => $deserializer_class } => $class;
|
||||
}
|
||||
|
||||
sub deserializer_class {
|
||||
my $self = shift;
|
||||
my $class = ref($self) || $self;
|
||||
$class =~ s/^DBI::Gofer::Serializer:://;
|
||||
return $class;
|
||||
}
|
||||
|
||||
sub serialize {
|
||||
my $self = shift;
|
||||
croak ref($self)." has not implemented the serialize method";
|
||||
}
|
||||
|
||||
sub deserialize {
|
||||
my $self = shift;
|
||||
croak ref($self)." has not implemented the deserialize method";
|
||||
}
|
||||
|
||||
1;
|
|
@ -0,0 +1,53 @@
|
|||
package DBI::Gofer::Serializer::DataDumper;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
our $VERSION = "0.009950";
|
||||
|
||||
# $Id: DataDumper.pm 9949 2007-09-18 09:38:15Z Tim $
|
||||
#
|
||||
# Copyright (c) 2007, Tim Bunce, Ireland
|
||||
#
|
||||
# You may distribute under the terms of either the GNU General Public
|
||||
# License or the Artistic License, as specified in the Perl README file.
|
||||
|
||||
=head1 NAME
|
||||
|
||||
DBI::Gofer::Serializer::DataDumper - Gofer serialization using DataDumper
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
$serializer = DBI::Gofer::Serializer::DataDumper->new();
|
||||
|
||||
$string = $serializer->serialize( $data );
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
Uses DataDumper to serialize. Deserialization is not supported.
|
||||
The output of this class is only meant for human consumption.
|
||||
|
||||
See also L<DBI::Gofer::Serializer::Base>.
|
||||
|
||||
=cut
|
||||
|
||||
use Data::Dumper;
|
||||
|
||||
use base qw(DBI::Gofer::Serializer::Base);
|
||||
|
||||
|
||||
sub serialize {
|
||||
my $self = shift;
|
||||
local $Data::Dumper::Indent = 1;
|
||||
local $Data::Dumper::Terse = 1;
|
||||
local $Data::Dumper::Useqq = 0; # enabling this disables xs
|
||||
local $Data::Dumper::Sortkeys = 1;
|
||||
local $Data::Dumper::Quotekeys = 0;
|
||||
local $Data::Dumper::Deparse = 0;
|
||||
local $Data::Dumper::Purity = 0;
|
||||
my $frozen = Data::Dumper::Dumper(shift);
|
||||
return $frozen unless wantarray;
|
||||
return ($frozen, $self->{deserializer_class});
|
||||
}
|
||||
|
||||
1;
|
|
@ -0,0 +1,60 @@
|
|||
package DBI::Gofer::Serializer::Storable;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use base qw(DBI::Gofer::Serializer::Base);
|
||||
|
||||
# $Id: Storable.pm 15585 2013-03-22 20:31:22Z Tim $
|
||||
#
|
||||
# Copyright (c) 2007, Tim Bunce, Ireland
|
||||
#
|
||||
# You may distribute under the terms of either the GNU General Public
|
||||
# License or the Artistic License, as specified in the Perl README file.
|
||||
|
||||
=head1 NAME
|
||||
|
||||
DBI::Gofer::Serializer::Storable - Gofer serialization using Storable
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
$serializer = DBI::Gofer::Serializer::Storable->new();
|
||||
|
||||
$string = $serializer->serialize( $data );
|
||||
($string, $deserializer_class) = $serializer->serialize( $data );
|
||||
|
||||
$data = $serializer->deserialize( $string );
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
Uses Storable::nfreeze() to serialize and Storable::thaw() to deserialize.
|
||||
|
||||
The serialize() method sets local $Storable::forgive_me = 1; so it doesn't
|
||||
croak if it encounters any data types that can't be serialized, such as code refs.
|
||||
|
||||
See also L<DBI::Gofer::Serializer::Base>.
|
||||
|
||||
=cut
|
||||
|
||||
use Storable qw(nfreeze thaw);
|
||||
|
||||
our $VERSION = "0.015586";
|
||||
|
||||
use base qw(DBI::Gofer::Serializer::Base);
|
||||
|
||||
|
||||
sub serialize {
|
||||
my $self = shift;
|
||||
local $Storable::forgive_me = 1; # for CODE refs etc
|
||||
local $Storable::canonical = 1; # for go_cache
|
||||
my $frozen = nfreeze(shift);
|
||||
return $frozen unless wantarray;
|
||||
return ($frozen, $self->{deserializer_class});
|
||||
}
|
||||
|
||||
sub deserialize {
|
||||
my $self = shift;
|
||||
return thaw(shift);
|
||||
}
|
||||
|
||||
1;
|
|
@ -0,0 +1,174 @@
|
|||
package DBI::Gofer::Transport::Base;
|
||||
|
||||
# $Id: Base.pm 12536 2009-02-24 22:37:09Z Tim $
|
||||
#
|
||||
# Copyright (c) 2007, Tim Bunce, Ireland
|
||||
#
|
||||
# You may distribute under the terms of either the GNU General Public
|
||||
# License or the Artistic License, as specified in the Perl README file.
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use DBI;
|
||||
|
||||
use base qw(DBI::Util::_accessor);
|
||||
|
||||
use DBI::Gofer::Serializer::Storable;
|
||||
use DBI::Gofer::Serializer::DataDumper;
|
||||
|
||||
our $VERSION = "0.012537";
|
||||
|
||||
__PACKAGE__->mk_accessors(qw(
|
||||
trace
|
||||
keep_meta_frozen
|
||||
serializer_obj
|
||||
));
|
||||
|
||||
|
||||
# see also $ENV{DBI_GOFER_TRACE} in DBI::Gofer::Execute
|
||||
sub _init_trace { (split(/=/,$ENV{DBI_GOFER_TRACE}||0))[0] }
|
||||
|
||||
|
||||
sub new {
|
||||
my ($class, $args) = @_;
|
||||
$args->{trace} ||= $class->_init_trace;
|
||||
$args->{serializer_obj} ||= DBI::Gofer::Serializer::Storable->new();
|
||||
my $self = bless {}, $class;
|
||||
$self->$_( $args->{$_} ) for keys %$args;
|
||||
$self->trace_msg("$class->new({ @{[ %$args ]} })\n") if $self->trace;
|
||||
return $self;
|
||||
}
|
||||
|
||||
my $packet_header_text = "GoFER1:";
|
||||
my $packet_header_regex = qr/^GoFER(\d+):/;
|
||||
|
||||
|
||||
sub _freeze_data {
|
||||
my ($self, $data, $serializer, $skip_trace) = @_;
|
||||
my $frozen = eval {
|
||||
$self->_dump("freezing $self->{trace} ".ref($data), $data)
|
||||
if !$skip_trace and $self->trace;
|
||||
|
||||
local $data->{meta}; # don't include meta in serialization
|
||||
$serializer ||= $self->{serializer_obj};
|
||||
my ($data, $deserializer_class) = $serializer->serialize($data);
|
||||
|
||||
$packet_header_text . $data;
|
||||
};
|
||||
if ($@) {
|
||||
chomp $@;
|
||||
die "Error freezing ".ref($data)." object: $@";
|
||||
}
|
||||
|
||||
# stash the frozen data into the data structure itself
|
||||
# to make life easy for the client caching code in DBD::Gofer::Transport::Base
|
||||
$data->{meta}{frozen} = $frozen if $self->keep_meta_frozen;
|
||||
|
||||
return $frozen;
|
||||
}
|
||||
# public aliases used by subclasses
|
||||
*freeze_request = \&_freeze_data;
|
||||
*freeze_response = \&_freeze_data;
|
||||
|
||||
|
||||
sub _thaw_data {
|
||||
my ($self, $frozen_data, $serializer, $skip_trace) = @_;
|
||||
my $data;
|
||||
eval {
|
||||
# check for and extract our gofer header and the info it contains
|
||||
(my $frozen = $frozen_data) =~ s/$packet_header_regex//o
|
||||
or die "does not have gofer header\n";
|
||||
my ($t_version) = $1;
|
||||
$serializer ||= $self->{serializer_obj};
|
||||
$data = $serializer->deserialize($frozen);
|
||||
die ref($serializer)."->deserialize didn't return a reference"
|
||||
unless ref $data;
|
||||
$data->{_transport}{version} = $t_version;
|
||||
|
||||
$data->{meta}{frozen} = $frozen_data if $self->keep_meta_frozen;
|
||||
};
|
||||
if ($@) {
|
||||
chomp(my $err = $@);
|
||||
# remove extra noise from Storable
|
||||
$err =~ s{ at \S+?/Storable.pm \(autosplit into \S+?/Storable/thaw.al\) line \d+(, \S+ line \d+)?}{};
|
||||
my $msg = sprintf "Error thawing: %s (data=%s)", $err, DBI::neat($frozen_data,50);
|
||||
Carp::cluck("$msg, pid $$ stack trace follows:"); # XXX if $self->trace;
|
||||
die $msg;
|
||||
}
|
||||
$self->_dump("thawing $self->{trace} ".ref($data), $data)
|
||||
if !$skip_trace and $self->trace;
|
||||
|
||||
return $data;
|
||||
}
|
||||
# public aliases used by subclasses
|
||||
*thaw_request = \&_thaw_data;
|
||||
*thaw_response = \&_thaw_data;
|
||||
|
||||
|
||||
# this should probably live in the request and response classes
|
||||
# and the tace level passed in
|
||||
sub _dump {
|
||||
my ($self, $label, $data) = @_;
|
||||
|
||||
# don't dump the binary
|
||||
local $data->{meta}{frozen} if $data->{meta} && $data->{meta}{frozen};
|
||||
|
||||
my $trace_level = $self->trace;
|
||||
my $summary;
|
||||
if ($trace_level >= 4) {
|
||||
require Data::Dumper;
|
||||
local $Data::Dumper::Indent = 1;
|
||||
local $Data::Dumper::Terse = 1;
|
||||
local $Data::Dumper::Useqq = 0;
|
||||
local $Data::Dumper::Sortkeys = 1;
|
||||
local $Data::Dumper::Quotekeys = 0;
|
||||
local $Data::Dumper::Deparse = 0;
|
||||
local $Data::Dumper::Purity = 0;
|
||||
$summary = Data::Dumper::Dumper($data);
|
||||
}
|
||||
elsif ($trace_level >= 2) {
|
||||
$summary = eval { $data->summary_as_text } || $@ || "no summary available\n";
|
||||
}
|
||||
else {
|
||||
$summary = eval { $data->outline_as_text."\n" } || $@ || "no summary available\n";
|
||||
}
|
||||
$self->trace_msg("$label: $summary");
|
||||
}
|
||||
|
||||
|
||||
sub trace_msg {
|
||||
my ($self, $msg, $min_level) = @_;
|
||||
$min_level = 1 unless defined $min_level;
|
||||
# transport trace level can override DBI's trace level
|
||||
$min_level = 0 if $self->trace >= $min_level;
|
||||
return DBI->trace_msg("gofer ".$msg, $min_level);
|
||||
}
|
||||
|
||||
1;
|
||||
|
||||
=head1 NAME
|
||||
|
||||
DBI::Gofer::Transport::Base - Base class for Gofer transports
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
This is the base class for server-side Gofer transports.
|
||||
|
||||
It's also the base class for the client-side base class L<DBD::Gofer::Transport::Base>.
|
||||
|
||||
This is an internal class.
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Tim Bunce, L<http://www.tim.bunce.name>
|
||||
|
||||
=head1 LICENCE AND COPYRIGHT
|
||||
|
||||
Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
|
||||
|
||||
This module is free software; you can redistribute it and/or
|
||||
modify it under the same terms as Perl itself. See L<perlartistic>.
|
||||
|
||||
=cut
|
||||
|
|
@ -0,0 +1,64 @@
|
|||
package DBI::Gofer::Transport::pipeone;
|
||||
|
||||
# $Id: pipeone.pm 12536 2009-02-24 22:37:09Z Tim $
|
||||
#
|
||||
# Copyright (c) 2007, Tim Bunce, Ireland
|
||||
#
|
||||
# You may distribute under the terms of either the GNU General Public
|
||||
# License or the Artistic License, as specified in the Perl README file.
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use DBI::Gofer::Execute;
|
||||
|
||||
use base qw(DBI::Gofer::Transport::Base Exporter);
|
||||
|
||||
our $VERSION = "0.012537";
|
||||
|
||||
our @EXPORT = qw(run_one_stdio);
|
||||
|
||||
my $executor = DBI::Gofer::Execute->new();
|
||||
|
||||
sub run_one_stdio {
|
||||
|
||||
binmode STDIN;
|
||||
binmode STDOUT;
|
||||
|
||||
my $transport = DBI::Gofer::Transport::pipeone->new();
|
||||
|
||||
my $frozen_request = do { local $/; <STDIN> };
|
||||
|
||||
my $response = $executor->execute_request( $transport->thaw_request($frozen_request) );
|
||||
|
||||
my $frozen_response = $transport->freeze_response($response);
|
||||
|
||||
print $frozen_response;
|
||||
|
||||
# no point calling $executor->update_stats(...) for pipeONE
|
||||
}
|
||||
|
||||
1;
|
||||
__END__
|
||||
|
||||
=head1 NAME
|
||||
|
||||
DBI::Gofer::Transport::pipeone - DBD::Gofer server-side transport for pipeone
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
See L<DBD::Gofer::Transport::pipeone>.
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Tim Bunce, L<http://www.tim.bunce.name>
|
||||
|
||||
=head1 LICENCE AND COPYRIGHT
|
||||
|
||||
Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
|
||||
|
||||
This module is free software; you can redistribute it and/or
|
||||
modify it under the same terms as Perl itself. See L<perlartistic>.
|
||||
|
||||
=cut
|
||||
|
|
@ -0,0 +1,76 @@
|
|||
package DBI::Gofer::Transport::stream;
|
||||
|
||||
# $Id: stream.pm 12536 2009-02-24 22:37:09Z Tim $
|
||||
#
|
||||
# Copyright (c) 2007, Tim Bunce, Ireland
|
||||
#
|
||||
# You may distribute under the terms of either the GNU General Public
|
||||
# License or the Artistic License, as specified in the Perl README file.
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use DBI qw(dbi_time);
|
||||
use DBI::Gofer::Execute;
|
||||
|
||||
use base qw(DBI::Gofer::Transport::pipeone Exporter);
|
||||
|
||||
our $VERSION = "0.012537";
|
||||
|
||||
our @EXPORT = qw(run_stdio_hex);
|
||||
|
||||
my $executor = DBI::Gofer::Execute->new();
|
||||
|
||||
sub run_stdio_hex {
|
||||
|
||||
my $transport = DBI::Gofer::Transport::stream->new();
|
||||
local $| = 1;
|
||||
|
||||
DBI->trace_msg("$0 started (pid $$)\n");
|
||||
|
||||
local $\; # OUTPUT_RECORD_SEPARATOR
|
||||
local $/ = "\012"; # INPUT_RECORD_SEPARATOR
|
||||
while ( defined( my $encoded_request = <STDIN> ) ) {
|
||||
my $time_received = dbi_time();
|
||||
$encoded_request =~ s/\015?\012$//;
|
||||
|
||||
my $frozen_request = pack "H*", $encoded_request;
|
||||
my $request = $transport->thaw_request( $frozen_request );
|
||||
|
||||
my $response = $executor->execute_request( $request );
|
||||
|
||||
my $frozen_response = $transport->freeze_response($response);
|
||||
my $encoded_response = unpack "H*", $frozen_response;
|
||||
|
||||
print $encoded_response, "\015\012"; # autoflushed due to $|=1
|
||||
|
||||
# there's no way to access the stats currently
|
||||
# so this just serves as a basic test and illustration of update_stats()
|
||||
$executor->update_stats($request, $response, $frozen_request, $frozen_response, $time_received, 1);
|
||||
}
|
||||
DBI->trace_msg("$0 ending (pid $$)\n");
|
||||
}
|
||||
|
||||
1;
|
||||
__END__
|
||||
|
||||
=head1 NAME
|
||||
|
||||
DBI::Gofer::Transport::stream - DBD::Gofer server-side transport for stream
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
See L<DBD::Gofer::Transport::stream>.
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Tim Bunce, L<http://www.tim.bunce.name>
|
||||
|
||||
=head1 LICENCE AND COPYRIGHT
|
||||
|
||||
Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
|
||||
|
||||
This module is free software; you can redistribute it and/or
|
||||
modify it under the same terms as Perl itself. See L<perlartistic>.
|
||||
|
||||
=cut
|
|
@ -0,0 +1,954 @@
|
|||
package DBI::Profile;
|
||||
|
||||
=head1 NAME
|
||||
|
||||
DBI::Profile - Performance profiling and benchmarking for the DBI
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
The easiest way to enable DBI profiling is to set the DBI_PROFILE
|
||||
environment variable to 2 and then run your code as usual:
|
||||
|
||||
DBI_PROFILE=2 prog.pl
|
||||
|
||||
This will profile your program and then output a textual summary
|
||||
grouped by query when the program exits. You can also enable profiling by
|
||||
setting the Profile attribute of any DBI handle:
|
||||
|
||||
$dbh->{Profile} = 2;
|
||||
|
||||
Then the summary will be printed when the handle is destroyed.
|
||||
|
||||
Many other values apart from are possible - see L<"ENABLING A PROFILE"> below.
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
The DBI::Profile module provides a simple interface to collect and
|
||||
report performance and benchmarking data from the DBI.
|
||||
|
||||
For a more elaborate interface, suitable for larger programs, see
|
||||
L<DBI::ProfileDumper|DBI::ProfileDumper> and L<dbiprof|dbiprof>.
|
||||
For Apache/mod_perl applications see
|
||||
L<DBI::ProfileDumper::Apache|DBI::ProfileDumper::Apache>.
|
||||
|
||||
=head1 OVERVIEW
|
||||
|
||||
Performance data collection for the DBI is built around several
|
||||
concepts which are important to understand clearly.
|
||||
|
||||
=over 4
|
||||
|
||||
=item Method Dispatch
|
||||
|
||||
Every method call on a DBI handle passes through a single 'dispatch'
|
||||
function which manages all the common aspects of DBI method calls,
|
||||
such as handling the RaiseError attribute.
|
||||
|
||||
=item Data Collection
|
||||
|
||||
If profiling is enabled for a handle then the dispatch code takes
|
||||
a high-resolution timestamp soon after it is entered. Then, after
|
||||
calling the appropriate method and just before returning, it takes
|
||||
another high-resolution timestamp and calls a function to record
|
||||
the information. That function is passed the two timestamps
|
||||
plus the DBI handle and the name of the method that was called.
|
||||
That data about a single DBI method call is called a I<profile sample>.
|
||||
|
||||
=item Data Filtering
|
||||
|
||||
If the method call was invoked by the DBI or by a driver then the call is
|
||||
ignored for profiling because the time spent will be accounted for by the
|
||||
original 'outermost' call for your code.
|
||||
|
||||
For example, the calls that the selectrow_arrayref() method makes
|
||||
to prepare() and execute() etc. are not counted individually
|
||||
because the time spent in those methods is going to be allocated
|
||||
to the selectrow_arrayref() method when it returns. If this was not
|
||||
done then it would be very easy to double count time spent inside
|
||||
the DBI.
|
||||
|
||||
=item Data Storage Tree
|
||||
|
||||
The profile data is accumulated as 'leaves on a tree'. The 'path' through the
|
||||
branches of the tree to a particular leaf is determined dynamically for each sample.
|
||||
This is a key feature of DBI profiling.
|
||||
|
||||
For each profiled method call the DBI walks along the Path and uses each value
|
||||
in the Path to step into and grow the Data tree.
|
||||
|
||||
For example, if the Path is
|
||||
|
||||
[ 'foo', 'bar', 'baz' ]
|
||||
|
||||
then the new profile sample data will be I<merged> into the tree at
|
||||
|
||||
$h->{Profile}->{Data}->{foo}->{bar}->{baz}
|
||||
|
||||
But it's not very useful to merge all the call data into one leaf node (except
|
||||
to get an overall 'time spent inside the DBI' total). It's more common to want
|
||||
the Path to include dynamic values such as the current statement text and/or
|
||||
the name of the method called to show what the time spent inside the DBI was for.
|
||||
|
||||
The Path can contain some 'magic cookie' values that are automatically replaced
|
||||
by corresponding dynamic values when they're used. These magic cookies always
|
||||
start with a punctuation character.
|
||||
|
||||
For example a value of 'C<!MethodName>' in the Path causes the corresponding
|
||||
entry in the Data to be the name of the method that was called.
|
||||
For example, if the Path was:
|
||||
|
||||
[ 'foo', '!MethodName', 'bar' ]
|
||||
|
||||
and the selectall_arrayref() method was called, then the profile sample data
|
||||
for that call will be merged into the tree at:
|
||||
|
||||
$h->{Profile}->{Data}->{foo}->{selectall_arrayref}->{bar}
|
||||
|
||||
=item Profile Data
|
||||
|
||||
Profile data is stored at the 'leaves' of the tree as references
|
||||
to an array of numeric values. For example:
|
||||
|
||||
[
|
||||
106, # 0: count of samples at this node
|
||||
0.0312958955764771, # 1: total duration
|
||||
0.000490069389343262, # 2: first duration
|
||||
0.000176072120666504, # 3: shortest duration
|
||||
0.00140702724456787, # 4: longest duration
|
||||
1023115819.83019, # 5: time of first sample
|
||||
1023115819.86576, # 6: time of last sample
|
||||
]
|
||||
|
||||
After the first sample, later samples always update elements 0, 1, and 6, and
|
||||
may update 3 or 4 depending on the duration of the sampled call.
|
||||
|
||||
=back
|
||||
|
||||
=head1 ENABLING A PROFILE
|
||||
|
||||
Profiling is enabled for a handle by assigning to the Profile
|
||||
attribute. For example:
|
||||
|
||||
$h->{Profile} = DBI::Profile->new();
|
||||
|
||||
The Profile attribute holds a blessed reference to a hash object
|
||||
that contains the profile data and attributes relating to it.
|
||||
|
||||
The class the Profile object is blessed into is expected to
|
||||
provide at least a DESTROY method which will dump the profile data
|
||||
to the DBI trace file handle (STDERR by default).
|
||||
|
||||
All these examples have the same effect as each other:
|
||||
|
||||
$h->{Profile} = 0;
|
||||
$h->{Profile} = "/DBI::Profile";
|
||||
$h->{Profile} = DBI::Profile->new();
|
||||
$h->{Profile} = {};
|
||||
$h->{Profile} = { Path => [] };
|
||||
|
||||
Similarly, these examples have the same effect as each other:
|
||||
|
||||
$h->{Profile} = 6;
|
||||
$h->{Profile} = "6/DBI::Profile";
|
||||
$h->{Profile} = "!Statement:!MethodName/DBI::Profile";
|
||||
$h->{Profile} = { Path => [ '!Statement', '!MethodName' ] };
|
||||
|
||||
If a non-blessed hash reference is given then the DBI::Profile
|
||||
module is automatically C<require>'d and the reference is blessed
|
||||
into that class.
|
||||
|
||||
If a string is given then it is processed like this:
|
||||
|
||||
($path, $module, $args) = split /\//, $string, 3
|
||||
|
||||
@path = split /:/, $path
|
||||
@args = split /:/, $args
|
||||
|
||||
eval "require $module" if $module
|
||||
$module ||= "DBI::Profile"
|
||||
|
||||
$module->new( Path => \@Path, @args )
|
||||
|
||||
So the first value is used to select the Path to be used (see below).
|
||||
The second value, if present, is used as the name of a module which
|
||||
will be loaded and it's C<new> method called. If not present it
|
||||
defaults to DBI::Profile. Any other values are passed as arguments
|
||||
to the C<new> method. For example: "C<2/DBIx::OtherProfile/Foo:42>".
|
||||
|
||||
Numbers can be used as a shorthand way to enable common Path values.
|
||||
The simplest way to explain how the values are interpreted is to show the code:
|
||||
|
||||
push @Path, "DBI" if $path_elem & 0x01;
|
||||
push @Path, "!Statement" if $path_elem & 0x02;
|
||||
push @Path, "!MethodName" if $path_elem & 0x04;
|
||||
push @Path, "!MethodClass" if $path_elem & 0x08;
|
||||
push @Path, "!Caller2" if $path_elem & 0x10;
|
||||
|
||||
So "2" is the same as "!Statement" and "6" (2+4) is the same as
|
||||
"!Statement:!Method". Those are the two most commonly used values. Using a
|
||||
negative number will reverse the path. Thus "-6" will group by method name then
|
||||
statement.
|
||||
|
||||
The splitting and parsing of string values assigned to the Profile
|
||||
attribute may seem a little odd, but there's a good reason for it.
|
||||
Remember that attributes can be embedded in the Data Source Name
|
||||
string which can be passed in to a script as a parameter. For
|
||||
example:
|
||||
|
||||
dbi:DriverName(Profile=>2):dbname
|
||||
dbi:DriverName(Profile=>{Username}:!Statement/MyProfiler/Foo:42):dbname
|
||||
|
||||
And also, if the C<DBI_PROFILE> environment variable is set then
|
||||
The DBI arranges for every driver handle to share the same profile
|
||||
object. When perl exits a single profile summary will be generated
|
||||
that reflects (as nearly as practical) the total use of the DBI by
|
||||
the application.
|
||||
|
||||
|
||||
=head1 THE PROFILE OBJECT
|
||||
|
||||
The DBI core expects the Profile attribute value to be a hash
|
||||
reference and if the following values don't exist it will create
|
||||
them as needed:
|
||||
|
||||
=head2 Data
|
||||
|
||||
A reference to a hash containing the collected profile data.
|
||||
|
||||
=head2 Path
|
||||
|
||||
The Path value is a reference to an array. Each element controls the
|
||||
value to use at the corresponding level of the profile Data tree.
|
||||
|
||||
If the value of Path is anything other than an array reference,
|
||||
it is treated as if it was:
|
||||
|
||||
[ '!Statement' ]
|
||||
|
||||
The elements of Path array can be one of the following types:
|
||||
|
||||
=head3 Special Constant
|
||||
|
||||
B<!Statement>
|
||||
|
||||
Use the current Statement text. Typically that's the value of the Statement
|
||||
attribute for the handle the method was called with. Some methods, like
|
||||
commit() and rollback(), are unrelated to a particular statement. For those
|
||||
methods !Statement records an empty string.
|
||||
|
||||
For statement handles this is always simply the string that was
|
||||
given to prepare() when the handle was created. For database handles
|
||||
this is the statement that was last prepared or executed on that
|
||||
database handle. That can lead to a little 'fuzzyness' because, for
|
||||
example, calls to the quote() method to build a new statement will
|
||||
typically be associated with the previous statement. In practice
|
||||
this isn't a significant issue and the dynamic Path mechanism can
|
||||
be used to setup your own rules.
|
||||
|
||||
B<!MethodName>
|
||||
|
||||
Use the name of the DBI method that the profile sample relates to.
|
||||
|
||||
B<!MethodClass>
|
||||
|
||||
Use the fully qualified name of the DBI method, including
|
||||
the package, that the profile sample relates to. This shows you
|
||||
where the method was implemented. For example:
|
||||
|
||||
'DBD::_::db::selectrow_arrayref' =>
|
||||
0.022902s
|
||||
'DBD::mysql::db::selectrow_arrayref' =>
|
||||
2.244521s / 99 = 0.022445s avg (first 0.022813s, min 0.022051s, max 0.028932s)
|
||||
|
||||
The "DBD::_::db::selectrow_arrayref" shows that the driver has
|
||||
inherited the selectrow_arrayref method provided by the DBI.
|
||||
|
||||
But you'll note that there is only one call to
|
||||
DBD::_::db::selectrow_arrayref but another 99 to
|
||||
DBD::mysql::db::selectrow_arrayref. Currently the first
|
||||
call doesn't record the true location. That may change.
|
||||
|
||||
B<!Caller>
|
||||
|
||||
Use a string showing the filename and line number of the code calling the method.
|
||||
|
||||
B<!Caller2>
|
||||
|
||||
Use a string showing the filename and line number of the code calling the
|
||||
method, as for !Caller, but also include filename and line number of the code
|
||||
that called that. Calls from DBI:: and DBD:: packages are skipped.
|
||||
|
||||
B<!File>
|
||||
|
||||
Same as !Caller above except that only the filename is included, not the line number.
|
||||
|
||||
B<!File2>
|
||||
|
||||
Same as !Caller2 above except that only the filenames are included, not the line number.
|
||||
|
||||
B<!Time>
|
||||
|
||||
Use the current value of time(). Rarely used. See the more useful C<!Time~N> below.
|
||||
|
||||
B<!Time~N>
|
||||
|
||||
Where C<N> is an integer. Use the current value of time() but with reduced precision.
|
||||
The value used is determined in this way:
|
||||
|
||||
int( time() / N ) * N
|
||||
|
||||
This is a useful way to segregate a profile into time slots. For example:
|
||||
|
||||
[ '!Time~60', '!Statement' ]
|
||||
|
||||
=head3 Code Reference
|
||||
|
||||
The subroutine is passed the handle it was called on and the DBI method name.
|
||||
The current Statement is in $_. The statement string should not be modified,
|
||||
so most subs start with C<local $_ = $_;>.
|
||||
|
||||
The list of values it returns is used at that point in the Profile Path.
|
||||
Any undefined values are treated as the string "C<undef>".
|
||||
|
||||
The sub can 'veto' (reject) a profile sample by including a reference to undef
|
||||
(C<\undef>) in the returned list. That can be useful when you want to only profile
|
||||
statements that match a certain pattern, or only profile certain methods.
|
||||
|
||||
=head3 Subroutine Specifier
|
||||
|
||||
A Path element that begins with 'C<&>' is treated as the name of a subroutine
|
||||
in the DBI::ProfileSubs namespace and replaced with the corresponding code reference.
|
||||
|
||||
Currently this only works when the Path is specified by the C<DBI_PROFILE>
|
||||
environment variable.
|
||||
|
||||
Also, currently, the only subroutine in the DBI::ProfileSubs namespace is
|
||||
C<'&norm_std_n3'>. That's a very handy subroutine when profiling code that
|
||||
doesn't use placeholders. See L<DBI::ProfileSubs> for more information.
|
||||
|
||||
=head3 Attribute Specifier
|
||||
|
||||
A string enclosed in braces, such as 'C<{Username}>', specifies that the current
|
||||
value of the corresponding database handle attribute should be used at that
|
||||
point in the Path.
|
||||
|
||||
=head3 Reference to a Scalar
|
||||
|
||||
Specifies that the current value of the referenced scalar be used at that point
|
||||
in the Path. This provides an efficient way to get 'contextual' values into
|
||||
your profile.
|
||||
|
||||
=head3 Other Values
|
||||
|
||||
Any other values are stringified and used literally.
|
||||
|
||||
(References, and values that begin with punctuation characters are reserved.)
|
||||
|
||||
|
||||
=head1 REPORTING
|
||||
|
||||
=head2 Report Format
|
||||
|
||||
The current accumulated profile data can be formatted and output using
|
||||
|
||||
print $h->{Profile}->format;
|
||||
|
||||
To discard the profile data and start collecting fresh data
|
||||
you can do:
|
||||
|
||||
$h->{Profile}->{Data} = undef;
|
||||
|
||||
|
||||
The default results format looks like this:
|
||||
|
||||
DBI::Profile: 0.001015s 42.7% (5 calls) programname @ YYYY-MM-DD HH:MM:SS
|
||||
'' =>
|
||||
0.000024s / 2 = 0.000012s avg (first 0.000015s, min 0.000009s, max 0.000015s)
|
||||
'SELECT mode,size,name FROM table' =>
|
||||
0.000991s / 3 = 0.000330s avg (first 0.000678s, min 0.000009s, max 0.000678s)
|
||||
|
||||
Which shows the total time spent inside the DBI, with a count of
|
||||
the total number of method calls and the name of the script being
|
||||
run, then a formatted version of the profile data tree.
|
||||
|
||||
If the results are being formatted when the perl process is exiting
|
||||
(which is usually the case when the DBI_PROFILE environment variable
|
||||
is used) then the percentage of time the process spent inside the
|
||||
DBI is also shown. If the process is not exiting then the percentage is
|
||||
calculated using the time between the first and last call to the DBI.
|
||||
|
||||
In the example above the paths in the tree are only one level deep and
|
||||
use the Statement text as the value (that's the default behaviour).
|
||||
|
||||
The merged profile data at the 'leaves' of the tree are presented
|
||||
as total time spent, count, average time spent (which is simply total
|
||||
time divided by the count), then the time spent on the first call,
|
||||
the time spent on the fastest call, and finally the time spent on
|
||||
the slowest call.
|
||||
|
||||
The 'avg', 'first', 'min' and 'max' times are not particularly
|
||||
useful when the profile data path only contains the statement text.
|
||||
Here's an extract of a more detailed example using both statement
|
||||
text and method name in the path:
|
||||
|
||||
'SELECT mode,size,name FROM table' =>
|
||||
'FETCH' =>
|
||||
0.000076s
|
||||
'fetchrow_hashref' =>
|
||||
0.036203s / 108 = 0.000335s avg (first 0.000490s, min 0.000152s, max 0.002786s)
|
||||
|
||||
Here you can see the 'avg', 'first', 'min' and 'max' for the
|
||||
108 calls to fetchrow_hashref() become rather more interesting.
|
||||
Also the data for FETCH just shows a time value because it was only
|
||||
called once.
|
||||
|
||||
Currently the profile data is output sorted by branch names. That
|
||||
may change in a later version so the leaf nodes are sorted by total
|
||||
time per leaf node.
|
||||
|
||||
|
||||
=head2 Report Destination
|
||||
|
||||
The default method of reporting is for the DESTROY method of the
|
||||
Profile object to format the results and write them using:
|
||||
|
||||
DBI->trace_msg($results, 0); # see $ON_DESTROY_DUMP below
|
||||
|
||||
to write them to the DBI trace() filehandle (which defaults to
|
||||
STDERR). To direct the DBI trace filehandle to write to a file
|
||||
without enabling tracing the trace() method can be called with a
|
||||
trace level of 0. For example:
|
||||
|
||||
DBI->trace(0, $filename);
|
||||
|
||||
The same effect can be achieved without changing the code by
|
||||
setting the C<DBI_TRACE> environment variable to C<0=filename>.
|
||||
|
||||
The $DBI::Profile::ON_DESTROY_DUMP variable holds a code ref
|
||||
that's called to perform the output of the formatted results.
|
||||
The default value is:
|
||||
|
||||
$ON_DESTROY_DUMP = sub { DBI->trace_msg($results, 0) };
|
||||
|
||||
Apart from making it easy to send the dump elsewhere, it can also
|
||||
be useful as a simple way to disable dumping results.
|
||||
|
||||
=head1 CHILD HANDLES
|
||||
|
||||
Child handles inherit a reference to the Profile attribute value
|
||||
of their parent. So if profiling is enabled for a database handle
|
||||
then by default the statement handles created from it all contribute
|
||||
to the same merged profile data tree.
|
||||
|
||||
|
||||
=head1 PROFILE OBJECT METHODS
|
||||
|
||||
=head2 format
|
||||
|
||||
See L</REPORTING>.
|
||||
|
||||
=head2 as_node_path_list
|
||||
|
||||
@ary = $dbh->{Profile}->as_node_path_list();
|
||||
@ary = $dbh->{Profile}->as_node_path_list($node, $path);
|
||||
|
||||
Returns the collected data ($dbh->{Profile}{Data}) restructured into a list of
|
||||
array refs, one for each leaf node in the Data tree. This 'flat' structure is
|
||||
often much simpler for applications to work with.
|
||||
|
||||
The first element of each array ref is a reference to the leaf node.
|
||||
The remaining elements are the 'path' through the data tree to that node.
|
||||
|
||||
For example, given a data tree like this:
|
||||
|
||||
{key1a}{key2a}[node1]
|
||||
{key1a}{key2b}[node2]
|
||||
{key1b}{key2a}{key3a}[node3]
|
||||
|
||||
The as_node_path_list() method will return this list:
|
||||
|
||||
[ [node1], 'key1a', 'key2a' ]
|
||||
[ [node2], 'key1a', 'key2b' ]
|
||||
[ [node3], 'key1b', 'key2a', 'key3a' ]
|
||||
|
||||
The nodes are ordered by key, depth-first.
|
||||
|
||||
The $node argument can be used to focus on a sub-tree.
|
||||
If not specified it defaults to $dbh->{Profile}{Data}.
|
||||
|
||||
The $path argument can be used to specify a list of path elements that will be
|
||||
added to each element of the returned list. If not specified it defaults to a
|
||||
ref to an empty array.
|
||||
|
||||
=head2 as_text
|
||||
|
||||
@txt = $dbh->{Profile}->as_text();
|
||||
$txt = $dbh->{Profile}->as_text({
|
||||
node => undef,
|
||||
path => [],
|
||||
separator => " > ",
|
||||
format => '%1$s: %11$fs / %10$d = %2$fs avg (first %12$fs, min %13$fs, max %14$fs)'."\n";
|
||||
sortsub => sub { ... },
|
||||
);
|
||||
|
||||
Returns the collected data ($dbh->{Profile}{Data}) reformatted into a list of formatted strings.
|
||||
In scalar context the list is returned as a single concatenated string.
|
||||
|
||||
A hashref can be used to pass in arguments, the default values are shown in the example above.
|
||||
|
||||
The C<node> and <path> arguments are passed to as_node_path_list().
|
||||
|
||||
The C<separator> argument is used to join the elements of the path for each leaf node.
|
||||
|
||||
The C<sortsub> argument is used to pass in a ref to a sub that will order the list.
|
||||
The subroutine will be passed a reference to the array returned by
|
||||
as_node_path_list() and should sort the contents of the array in place.
|
||||
The return value from the sub is ignored. For example, to sort the nodes by the
|
||||
second level key you could use:
|
||||
|
||||
sortsub => sub { my $ary=shift; @$ary = sort { $a->[2] cmp $b->[2] } @$ary }
|
||||
|
||||
The C<format> argument is a C<sprintf> format string that specifies the format
|
||||
to use for each leaf node. It uses the explicit format parameter index
|
||||
mechanism to specify which of the arguments should appear where in the string.
|
||||
The arguments to sprintf are:
|
||||
|
||||
1: path to node, joined with the separator
|
||||
2: average duration (total duration/count)
|
||||
(3 thru 9 are currently unused)
|
||||
10: count
|
||||
11: total duration
|
||||
12: first duration
|
||||
13: smallest duration
|
||||
14: largest duration
|
||||
15: time of first call
|
||||
16: time of first call
|
||||
|
||||
=head1 CUSTOM DATA MANIPULATION
|
||||
|
||||
Recall that C<< $h->{Profile}->{Data} >> is a reference to the collected data.
|
||||
Either to a 'leaf' array (when the Path is empty, i.e., DBI_PROFILE env var is 1),
|
||||
or a reference to hash containing values that are either further hash
|
||||
references or leaf array references.
|
||||
|
||||
Sometimes it's useful to be able to summarise some or all of the collected data.
|
||||
The dbi_profile_merge_nodes() function can be used to merge leaf node values.
|
||||
|
||||
=head2 dbi_profile_merge_nodes
|
||||
|
||||
use DBI qw(dbi_profile_merge_nodes);
|
||||
|
||||
$time_in_dbi = dbi_profile_merge_nodes(my $totals=[], @$leaves);
|
||||
|
||||
Merges profile data node. Given a reference to a destination array, and zero or
|
||||
more references to profile data, merges the profile data into the destination array.
|
||||
For example:
|
||||
|
||||
$time_in_dbi = dbi_profile_merge_nodes(
|
||||
my $totals=[],
|
||||
[ 10, 0.51, 0.11, 0.01, 0.22, 1023110000, 1023110010 ],
|
||||
[ 15, 0.42, 0.12, 0.02, 0.23, 1023110005, 1023110009 ],
|
||||
);
|
||||
|
||||
$totals will then contain
|
||||
|
||||
[ 25, 0.93, 0.11, 0.01, 0.23, 1023110000, 1023110010 ]
|
||||
|
||||
and $time_in_dbi will be 0.93;
|
||||
|
||||
The second argument need not be just leaf nodes. If given a reference to a hash
|
||||
then the hash is recursively searched for leaf nodes and all those found
|
||||
are merged.
|
||||
|
||||
For example, to get the time spent 'inside' the DBI during an http request,
|
||||
your logging code run at the end of the request (i.e. mod_perl LogHandler)
|
||||
could use:
|
||||
|
||||
my $time_in_dbi = 0;
|
||||
if (my $Profile = $dbh->{Profile}) { # if DBI profiling is enabled
|
||||
$time_in_dbi = dbi_profile_merge_nodes(my $total=[], $Profile->{Data});
|
||||
$Profile->{Data} = {}; # reset the profile data
|
||||
}
|
||||
|
||||
If profiling has been enabled then $time_in_dbi will hold the time spent inside
|
||||
the DBI for that handle (and any other handles that share the same profile data)
|
||||
since the last request.
|
||||
|
||||
Prior to DBI 1.56 the dbi_profile_merge_nodes() function was called dbi_profile_merge().
|
||||
That name still exists as an alias.
|
||||
|
||||
=head1 CUSTOM DATA COLLECTION
|
||||
|
||||
=head2 Using The Path Attribute
|
||||
|
||||
XXX example to be added later using a selectall_arrayref call
|
||||
XXX nested inside a fetch loop where the first column of the
|
||||
XXX outer loop is bound to the profile Path using
|
||||
XXX bind_column(1, \${ $dbh->{Profile}->{Path}->[0] })
|
||||
XXX so you end up with separate profiles for each loop
|
||||
XXX (patches welcome to add this to the docs :)
|
||||
|
||||
=head2 Adding Your Own Samples
|
||||
|
||||
The dbi_profile() function can be used to add extra sample data
|
||||
into the profile data tree. For example:
|
||||
|
||||
use DBI;
|
||||
use DBI::Profile (dbi_profile dbi_time);
|
||||
|
||||
my $t1 = dbi_time(); # floating point high-resolution time
|
||||
|
||||
... execute code you want to profile here ...
|
||||
|
||||
my $t2 = dbi_time();
|
||||
dbi_profile($h, $statement, $method, $t1, $t2);
|
||||
|
||||
The $h parameter is the handle the extra profile sample should be
|
||||
associated with. The $statement parameter is the string to use where
|
||||
the Path specifies !Statement. If $statement is undef
|
||||
then $h->{Statement} will be used. Similarly $method is the string
|
||||
to use if the Path specifies !MethodName. There is no
|
||||
default value for $method.
|
||||
|
||||
The $h->{Profile}{Path} attribute is processed by dbi_profile() in
|
||||
the usual way.
|
||||
|
||||
The $h parameter is usually a DBI handle but it can also be a reference to a
|
||||
hash, in which case the dbi_profile() acts on each defined value in the hash.
|
||||
This is an efficient way to update multiple profiles with a single sample,
|
||||
and is used by the L<DashProfiler> module.
|
||||
|
||||
=head1 SUBCLASSING
|
||||
|
||||
Alternate profile modules must subclass DBI::Profile to help ensure
|
||||
they work with future versions of the DBI.
|
||||
|
||||
|
||||
=head1 CAVEATS
|
||||
|
||||
Applications which generate many different statement strings
|
||||
(typically because they don't use placeholders) and profile with
|
||||
!Statement in the Path (the default) will consume memory
|
||||
in the Profile Data structure for each statement. Use a code ref
|
||||
in the Path to return an edited (simplified) form of the statement.
|
||||
|
||||
If a method throws an exception itself (not via RaiseError) then
|
||||
it won't be counted in the profile.
|
||||
|
||||
If a HandleError subroutine throws an exception (rather than returning
|
||||
0 and letting RaiseError do it) then the method call won't be counted
|
||||
in the profile.
|
||||
|
||||
Time spent in DESTROY is added to the profile of the parent handle.
|
||||
|
||||
Time spent in DBI->*() methods is not counted. The time spent in
|
||||
the driver connect method, $drh->connect(), when it's called by
|
||||
DBI->connect is counted if the DBI_PROFILE environment variable is set.
|
||||
|
||||
Time spent fetching tied variables, $DBI::errstr, is counted.
|
||||
|
||||
Time spent in FETCH for $h->{Profile} is not counted, so getting the profile
|
||||
data doesn't alter it.
|
||||
|
||||
DBI::PurePerl does not support profiling (though it could in theory).
|
||||
|
||||
For asynchronous queries, time spent while the query is running on the
|
||||
backend is not counted.
|
||||
|
||||
A few platforms don't support the gettimeofday() high resolution
|
||||
time function used by the DBI (and available via the dbi_time() function).
|
||||
In which case you'll get integer resolution time which is mostly useless.
|
||||
|
||||
On Windows platforms the dbi_time() function is limited to millisecond
|
||||
resolution. Which isn't sufficiently fine for our needs, but still
|
||||
much better than integer resolution. This limited resolution means
|
||||
that fast method calls will often register as taking 0 time. And
|
||||
timings in general will have much more 'jitter' depending on where
|
||||
within the 'current millisecond' the start and end timing was taken.
|
||||
|
||||
This documentation could be more clear. Probably needs to be reordered
|
||||
to start with several examples and build from there. Trying to
|
||||
explain the concepts first seems painful and to lead to just as
|
||||
many forward references. (Patches welcome!)
|
||||
|
||||
=cut
|
||||
|
||||
|
||||
use strict;
|
||||
use vars qw(@ISA @EXPORT @EXPORT_OK $VERSION);
|
||||
use Exporter ();
|
||||
use UNIVERSAL ();
|
||||
use Carp;
|
||||
|
||||
use DBI qw(dbi_time dbi_profile dbi_profile_merge_nodes dbi_profile_merge);
|
||||
|
||||
$VERSION = "2.015065";
|
||||
|
||||
@ISA = qw(Exporter);
|
||||
@EXPORT = qw(
|
||||
DBIprofile_Statement
|
||||
DBIprofile_MethodName
|
||||
DBIprofile_MethodClass
|
||||
dbi_profile
|
||||
dbi_profile_merge_nodes
|
||||
dbi_profile_merge
|
||||
dbi_time
|
||||
);
|
||||
@EXPORT_OK = qw(
|
||||
format_profile_thingy
|
||||
);
|
||||
|
||||
use constant DBIprofile_Statement => '!Statement';
|
||||
use constant DBIprofile_MethodName => '!MethodName';
|
||||
use constant DBIprofile_MethodClass => '!MethodClass';
|
||||
|
||||
our $ON_DESTROY_DUMP = sub { DBI->trace_msg(shift, 0) };
|
||||
our $ON_FLUSH_DUMP = sub { DBI->trace_msg(shift, 0) };
|
||||
|
||||
sub new {
|
||||
my $class = shift;
|
||||
my $profile = { @_ };
|
||||
return bless $profile => $class;
|
||||
}
|
||||
|
||||
|
||||
sub _auto_new {
|
||||
my $class = shift;
|
||||
my ($arg) = @_;
|
||||
|
||||
# This sub is called by DBI internals when a non-hash-ref is
|
||||
# assigned to the Profile attribute. For example
|
||||
# dbi:mysql(RaiseError=>1,Profile=>!Statement:!MethodName/DBIx::MyProfile/arg1:arg2):dbname
|
||||
# This sub works out what to do and returns a suitable hash ref.
|
||||
|
||||
$arg =~ s/^DBI::/2\/DBI::/
|
||||
and carp "Automatically changed old-style DBI::Profile specification to $arg";
|
||||
|
||||
# it's a path/module/k1:v1:k2:v2:... list
|
||||
my ($path, $package, $args) = split /\//, $arg, 3;
|
||||
my @args = (defined $args) ? split(/:/, $args, -1) : ();
|
||||
my @Path;
|
||||
|
||||
for my $element (split /:/, $path) {
|
||||
if (DBI::looks_like_number($element)) {
|
||||
my $reverse = ($element < 0) ? ($element=-$element, 1) : 0;
|
||||
my @p;
|
||||
# a single "DBI" is special-cased in format()
|
||||
push @p, "DBI" if $element & 0x01;
|
||||
push @p, DBIprofile_Statement if $element & 0x02;
|
||||
push @p, DBIprofile_MethodName if $element & 0x04;
|
||||
push @p, DBIprofile_MethodClass if $element & 0x08;
|
||||
push @p, '!Caller2' if $element & 0x10;
|
||||
push @Path, ($reverse ? reverse @p : @p);
|
||||
}
|
||||
elsif ($element =~ m/^&(\w.*)/) {
|
||||
my $name = "DBI::ProfileSubs::$1"; # capture $1 early
|
||||
require DBI::ProfileSubs;
|
||||
my $code = do { no strict; *{$name}{CODE} };
|
||||
if (defined $code) {
|
||||
push @Path, $code;
|
||||
}
|
||||
else {
|
||||
warn "$name: subroutine not found\n";
|
||||
push @Path, $element;
|
||||
}
|
||||
}
|
||||
else {
|
||||
push @Path, $element;
|
||||
}
|
||||
}
|
||||
|
||||
eval "require $package" if $package; # silently ignores errors
|
||||
$package ||= $class;
|
||||
|
||||
return $package->new(Path => \@Path, @args);
|
||||
}
|
||||
|
||||
|
||||
sub empty { # empty out profile data
|
||||
my $self = shift;
|
||||
DBI->trace_msg("profile data discarded\n",0) if $self->{Trace};
|
||||
$self->{Data} = undef;
|
||||
}
|
||||
|
||||
sub filename { # baseclass method, see DBI::ProfileDumper
|
||||
return undef;
|
||||
}
|
||||
|
||||
sub flush_to_disk { # baseclass method, see DBI::ProfileDumper & DashProfiler::Core
|
||||
my $self = shift;
|
||||
return unless $ON_FLUSH_DUMP;
|
||||
return unless $self->{Data};
|
||||
my $detail = $self->format();
|
||||
$ON_FLUSH_DUMP->($detail) if $detail;
|
||||
}
|
||||
|
||||
|
||||
sub as_node_path_list {
|
||||
my ($self, $node, $path) = @_;
|
||||
# convert the tree into an array of arrays
|
||||
# from
|
||||
# {key1a}{key2a}[node1]
|
||||
# {key1a}{key2b}[node2]
|
||||
# {key1b}{key2a}{key3a}[node3]
|
||||
# to
|
||||
# [ [node1], 'key1a', 'key2a' ]
|
||||
# [ [node2], 'key1a', 'key2b' ]
|
||||
# [ [node3], 'key1b', 'key2a', 'key3a' ]
|
||||
|
||||
$node ||= $self->{Data} or return;
|
||||
$path ||= [];
|
||||
if (ref $node eq 'HASH') { # recurse
|
||||
$path = [ @$path, undef ];
|
||||
return map {
|
||||
$path->[-1] = $_;
|
||||
($node->{$_}) ? $self->as_node_path_list($node->{$_}, $path) : ()
|
||||
} sort keys %$node;
|
||||
}
|
||||
return [ $node, @$path ];
|
||||
}
|
||||
|
||||
|
||||
sub as_text {
|
||||
my ($self, $args_ref) = @_;
|
||||
my $separator = $args_ref->{separator} || " > ";
|
||||
my $format_path_element = $args_ref->{format_path_element}
|
||||
|| "%s"; # or e.g., " key%2$d='%s'"
|
||||
my $format = $args_ref->{format}
|
||||
|| '%1$s: %11$fs / %10$d = %2$fs avg (first %12$fs, min %13$fs, max %14$fs)'."\n";
|
||||
|
||||
my @node_path_list = $self->as_node_path_list(undef, $args_ref->{path});
|
||||
|
||||
$args_ref->{sortsub}->(\@node_path_list) if $args_ref->{sortsub};
|
||||
|
||||
my $eval = "qr/".quotemeta($separator)."/";
|
||||
my $separator_re = eval($eval) || quotemeta($separator);
|
||||
#warn "[$eval] = [$separator_re]";
|
||||
my @text;
|
||||
my @spare_slots = (undef) x 7;
|
||||
for my $node_path (@node_path_list) {
|
||||
my ($node, @path) = @$node_path;
|
||||
my $idx = 0;
|
||||
for (@path) {
|
||||
s/[\r\n]+/ /g;
|
||||
s/$separator_re/ /g;
|
||||
++$idx;
|
||||
if ($format_path_element eq "%s") {
|
||||
$_ = sprintf $format_path_element, $_;
|
||||
} else {
|
||||
$_ = sprintf $format_path_element, $_, $idx;
|
||||
}
|
||||
}
|
||||
push @text, sprintf $format,
|
||||
join($separator, @path), # 1=path
|
||||
($node->[0] ? $node->[1]/$node->[0] : 0), # 2=avg
|
||||
@spare_slots,
|
||||
@$node; # 10=count, 11=dur, 12=first_dur, 13=min, 14=max, 15=first_called, 16=last_called
|
||||
}
|
||||
return @text if wantarray;
|
||||
return join "", @text;
|
||||
}
|
||||
|
||||
|
||||
sub format {
|
||||
my $self = shift;
|
||||
my $class = ref($self) || $self;
|
||||
|
||||
my $prologue = "$class: ";
|
||||
my $detail = $self->format_profile_thingy(
|
||||
$self->{Data}, 0, " ",
|
||||
my $path = [],
|
||||
my $leaves = [],
|
||||
)."\n";
|
||||
|
||||
if (@$leaves) {
|
||||
dbi_profile_merge_nodes(my $totals=[], @$leaves);
|
||||
my ($count, $time_in_dbi, undef, undef, undef, $t1, $t2) = @$totals;
|
||||
(my $progname = $0) =~ s:.*/::;
|
||||
if ($count) {
|
||||
$prologue .= sprintf "%fs ", $time_in_dbi;
|
||||
my $perl_time = ($DBI::PERL_ENDING) ? time() - $^T : $t2-$t1;
|
||||
$prologue .= sprintf "%.2f%% ", $time_in_dbi/$perl_time*100 if $perl_time;
|
||||
my @lt = localtime(time);
|
||||
my $ts = sprintf "%d-%02d-%02d %02d:%02d:%02d",
|
||||
1900+$lt[5], $lt[4]+1, @lt[3,2,1,0];
|
||||
$prologue .= sprintf "(%d calls) $progname \@ $ts\n", $count;
|
||||
}
|
||||
if (@$leaves == 1 && ref($self->{Data}) eq 'HASH' && $self->{Data}->{DBI}) {
|
||||
$detail = ""; # hide the "DBI" from DBI_PROFILE=1
|
||||
}
|
||||
}
|
||||
return ($prologue, $detail) if wantarray;
|
||||
return $prologue.$detail;
|
||||
}
|
||||
|
||||
|
||||
sub format_profile_leaf {
|
||||
my ($self, $thingy, $depth, $pad, $path, $leaves) = @_;
|
||||
croak "format_profile_leaf called on non-leaf ($thingy)"
|
||||
unless UNIVERSAL::isa($thingy,'ARRAY');
|
||||
|
||||
push @$leaves, $thingy if $leaves;
|
||||
my ($count, $total_time, $first_time, $min, $max, $first_called, $last_called) = @$thingy;
|
||||
return sprintf "%s%fs\n", ($pad x $depth), $total_time
|
||||
if $count <= 1;
|
||||
return sprintf "%s%fs / %d = %fs avg (first %fs, min %fs, max %fs)\n",
|
||||
($pad x $depth), $total_time, $count, $count ? $total_time/$count : 0,
|
||||
$first_time, $min, $max;
|
||||
}
|
||||
|
||||
|
||||
sub format_profile_branch {
|
||||
my ($self, $thingy, $depth, $pad, $path, $leaves) = @_;
|
||||
croak "format_profile_branch called on non-branch ($thingy)"
|
||||
unless UNIVERSAL::isa($thingy,'HASH');
|
||||
my @chunk;
|
||||
my @keys = sort keys %$thingy;
|
||||
while ( @keys ) {
|
||||
my $k = shift @keys;
|
||||
my $v = $thingy->{$k};
|
||||
push @$path, $k;
|
||||
push @chunk, sprintf "%s'%s' =>\n%s",
|
||||
($pad x $depth), $k,
|
||||
$self->format_profile_thingy($v, $depth+1, $pad, $path, $leaves);
|
||||
pop @$path;
|
||||
}
|
||||
return join "", @chunk;
|
||||
}
|
||||
|
||||
|
||||
sub format_profile_thingy {
|
||||
my ($self, $thingy, $depth, $pad, $path, $leaves) = @_;
|
||||
return "undef" if not defined $thingy;
|
||||
return $self->format_profile_leaf( $thingy, $depth, $pad, $path, $leaves)
|
||||
if UNIVERSAL::isa($thingy,'ARRAY');
|
||||
return $self->format_profile_branch($thingy, $depth, $pad, $path, $leaves)
|
||||
if UNIVERSAL::isa($thingy,'HASH');
|
||||
return "$thingy\n";
|
||||
}
|
||||
|
||||
|
||||
sub on_destroy {
|
||||
my $self = shift;
|
||||
return unless $ON_DESTROY_DUMP;
|
||||
return unless $self->{Data};
|
||||
my $detail = $self->format();
|
||||
$ON_DESTROY_DUMP->($detail) if $detail;
|
||||
$self->{Data} = undef;
|
||||
}
|
||||
|
||||
sub DESTROY {
|
||||
my $self = shift;
|
||||
local $@;
|
||||
DBI->trace_msg("profile data DESTROY\n",0)
|
||||
if (($self->{Trace}||0) >= 2);
|
||||
eval { $self->on_destroy };
|
||||
if ($@) {
|
||||
chomp $@;
|
||||
my $class = ref($self) || $self;
|
||||
DBI->trace_msg("$class on_destroy failed: $@", 0);
|
||||
}
|
||||
}
|
||||
|
||||
1;
|
||||
|
|
@ -0,0 +1,736 @@
|
|||
package DBI::ProfileData;
|
||||
use strict;
|
||||
|
||||
=head1 NAME
|
||||
|
||||
DBI::ProfileData - manipulate DBI::ProfileDumper data dumps
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
The easiest way to use this module is through the dbiprof frontend
|
||||
(see L<dbiprof> for details):
|
||||
|
||||
dbiprof --number 15 --sort count
|
||||
|
||||
This module can also be used to roll your own profile analysis:
|
||||
|
||||
# load data from dbi.prof
|
||||
$prof = DBI::ProfileData->new(File => "dbi.prof");
|
||||
|
||||
# get a count of the records (unique paths) in the data set
|
||||
$count = $prof->count();
|
||||
|
||||
# sort by longest overall time
|
||||
$prof->sort(field => "longest");
|
||||
|
||||
# sort by longest overall time, least to greatest
|
||||
$prof->sort(field => "longest", reverse => 1);
|
||||
|
||||
# exclude records with key2 eq 'disconnect'
|
||||
$prof->exclude(key2 => 'disconnect');
|
||||
|
||||
# exclude records with key1 matching /^UPDATE/i
|
||||
$prof->exclude(key1 => qr/^UPDATE/i);
|
||||
|
||||
# remove all records except those where key1 matches /^SELECT/i
|
||||
$prof->match(key1 => qr/^SELECT/i);
|
||||
|
||||
# produce a formatted report with the given number of items
|
||||
$report = $prof->report(number => 10);
|
||||
|
||||
# clone the profile data set
|
||||
$clone = $prof->clone();
|
||||
|
||||
# get access to hash of header values
|
||||
$header = $prof->header();
|
||||
|
||||
# get access to sorted array of nodes
|
||||
$nodes = $prof->nodes();
|
||||
|
||||
# format a single node in the same style as report()
|
||||
$text = $prof->format($nodes->[0]);
|
||||
|
||||
# get access to Data hash in DBI::Profile format
|
||||
$Data = $prof->Data();
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
This module offers the ability to read, manipulate and format
|
||||
L<DBI::ProfileDumper> profile data.
|
||||
|
||||
Conceptually, a profile consists of a series of records, or nodes,
|
||||
each of each has a set of statistics and set of keys. Each record
|
||||
must have a unique set of keys, but there is no requirement that every
|
||||
record have the same number of keys.
|
||||
|
||||
=head1 METHODS
|
||||
|
||||
The following methods are supported by DBI::ProfileData objects.
|
||||
|
||||
=cut
|
||||
|
||||
our $VERSION = "2.010008";
|
||||
|
||||
use Carp qw(croak);
|
||||
use Symbol;
|
||||
use Fcntl qw(:flock);
|
||||
|
||||
use DBI::Profile qw(dbi_profile_merge);
|
||||
|
||||
# some constants for use with node data arrays
|
||||
sub COUNT () { 0 };
|
||||
sub TOTAL () { 1 };
|
||||
sub FIRST () { 2 };
|
||||
sub SHORTEST () { 3 };
|
||||
sub LONGEST () { 4 };
|
||||
sub FIRST_AT () { 5 };
|
||||
sub LAST_AT () { 6 };
|
||||
sub PATH () { 7 };
|
||||
|
||||
|
||||
my $HAS_FLOCK = (defined $ENV{DBI_PROFILE_FLOCK})
|
||||
? $ENV{DBI_PROFILE_FLOCK}
|
||||
: do { local $@; eval { flock STDOUT, 0; 1 } };
|
||||
|
||||
|
||||
=head2 $prof = DBI::ProfileData->new(File => "dbi.prof")
|
||||
|
||||
=head2 $prof = DBI::ProfileData->new(File => "dbi.prof", Filter => sub { ... })
|
||||
|
||||
=head2 $prof = DBI::ProfileData->new(Files => [ "dbi.prof.1", "dbi.prof.2" ])
|
||||
|
||||
Creates a new DBI::ProfileData object. Takes either a single file
|
||||
through the File option or a list of Files in an array ref. If
|
||||
multiple files are specified then the header data from the first file
|
||||
is used.
|
||||
|
||||
=head3 Files
|
||||
|
||||
Reference to an array of file names to read.
|
||||
|
||||
=head3 File
|
||||
|
||||
Name of file to read. Takes precedence over C<Files>.
|
||||
|
||||
=head3 DeleteFiles
|
||||
|
||||
If true, the files are deleted after being read.
|
||||
|
||||
Actually the files are renamed with a C<deleteme> suffix before being read,
|
||||
and then, after reading all the files, they're all deleted together.
|
||||
|
||||
The files are locked while being read which, combined with the rename, makes it
|
||||
safe to 'consume' files that are still being generated by L<DBI::ProfileDumper>.
|
||||
|
||||
=head3 Filter
|
||||
|
||||
The C<Filter> parameter can be used to supply a code reference that can
|
||||
manipulate the profile data as it is being read. This is most useful for
|
||||
editing SQL statements so that slightly different statements in the raw data
|
||||
will be merged and aggregated in the loaded data. For example:
|
||||
|
||||
Filter => sub {
|
||||
my ($path_ref, $data_ref) = @_;
|
||||
s/foo = '.*?'/foo = '...'/ for @$path_ref;
|
||||
}
|
||||
|
||||
Here's an example that performs some normalization on the SQL. It converts all
|
||||
numbers to C<N> and all quoted strings to C<S>. It can also convert digits to
|
||||
N within names. Finally, it summarizes long "IN (...)" clauses.
|
||||
|
||||
It's aggressive and simplistic, but it's often sufficient, and serves as an
|
||||
example that you can tailor to suit your own needs:
|
||||
|
||||
Filter => sub {
|
||||
my ($path_ref, $data_ref) = @_;
|
||||
local $_ = $path_ref->[0]; # whichever element contains the SQL Statement
|
||||
s/\b\d+\b/N/g; # 42 -> N
|
||||
s/\b0x[0-9A-Fa-f]+\b/N/g; # 0xFE -> N
|
||||
s/'.*?'/'S'/g; # single quoted strings (doesn't handle escapes)
|
||||
s/".*?"/"S"/g; # double quoted strings (doesn't handle escapes)
|
||||
# convert names like log_20001231 into log_NNNNNNNN, controlled by $opt{n}
|
||||
s/([a-z_]+)(\d{$opt{n},})/$1.('N' x length($2))/ieg if $opt{n};
|
||||
# abbreviate massive "in (...)" statements and similar
|
||||
s!(([NS],){100,})!sprintf("$2,{repeated %d times}",length($1)/2)!eg;
|
||||
}
|
||||
|
||||
It's often better to perform this kinds of normalization in the DBI while the
|
||||
data is being collected, to avoid too much memory being used by storing profile
|
||||
data for many different SQL statement. See L<DBI::Profile>.
|
||||
|
||||
=cut
|
||||
|
||||
sub new {
|
||||
my $pkg = shift;
|
||||
my $self = {
|
||||
Files => [ "dbi.prof" ],
|
||||
Filter => undef,
|
||||
DeleteFiles => 0,
|
||||
LockFile => $HAS_FLOCK,
|
||||
_header => {},
|
||||
_nodes => [],
|
||||
_node_lookup => {},
|
||||
_sort => 'none',
|
||||
@_
|
||||
};
|
||||
bless $self, $pkg;
|
||||
|
||||
# File (singular) overrides Files (plural)
|
||||
$self->{Files} = [ $self->{File} ] if exists $self->{File};
|
||||
|
||||
$self->_read_files();
|
||||
return $self;
|
||||
}
|
||||
|
||||
# read files into _header and _nodes
|
||||
sub _read_files {
|
||||
my $self = shift;
|
||||
my $files = $self->{Files};
|
||||
my $read_header = 0;
|
||||
my @files_to_delete;
|
||||
|
||||
my $fh = gensym;
|
||||
foreach (@$files) {
|
||||
my $filename = $_;
|
||||
|
||||
if ($self->{DeleteFiles}) {
|
||||
my $newfilename = $filename . ".deleteme";
|
||||
if ($^O eq 'VMS') {
|
||||
# VMS default filesystem can only have one period
|
||||
$newfilename = $filename . 'deleteme';
|
||||
}
|
||||
# will clobber an existing $newfilename
|
||||
rename($filename, $newfilename)
|
||||
or croak "Can't rename($filename, $newfilename): $!";
|
||||
# On a versioned filesystem we want old versions to be removed
|
||||
1 while (unlink $filename);
|
||||
$filename = $newfilename;
|
||||
}
|
||||
|
||||
open($fh, "<", $filename)
|
||||
or croak("Unable to read profile file '$filename': $!");
|
||||
|
||||
# lock the file in case it's still being written to
|
||||
# (we'll be forced to wait till the write is complete)
|
||||
flock($fh, LOCK_SH) if $self->{LockFile};
|
||||
|
||||
if (-s $fh) { # not empty
|
||||
$self->_read_header($fh, $filename, $read_header ? 0 : 1);
|
||||
$read_header = 1;
|
||||
$self->_read_body($fh, $filename);
|
||||
}
|
||||
close($fh); # and release lock
|
||||
|
||||
push @files_to_delete, $filename
|
||||
if $self->{DeleteFiles};
|
||||
}
|
||||
for (@files_to_delete){
|
||||
# for versioned file systems
|
||||
1 while (unlink $_);
|
||||
if(-e $_){
|
||||
warn "Can't delete '$_': $!";
|
||||
}
|
||||
}
|
||||
|
||||
# discard node_lookup now that all files are read
|
||||
delete $self->{_node_lookup};
|
||||
}
|
||||
|
||||
# read the header from the given $fh named $filename. Discards the
|
||||
# data unless $keep.
|
||||
sub _read_header {
|
||||
my ($self, $fh, $filename, $keep) = @_;
|
||||
|
||||
# get profiler module id
|
||||
my $first = <$fh>;
|
||||
chomp $first;
|
||||
$self->{_profiler} = $first if $keep;
|
||||
|
||||
# collect variables from the header
|
||||
local $_;
|
||||
while (<$fh>) {
|
||||
chomp;
|
||||
last unless length $_;
|
||||
/^(\S+)\s*=\s*(.*)/
|
||||
or croak("Syntax error in header in $filename line $.: $_");
|
||||
# XXX should compare new with existing (from previous file)
|
||||
# and warn if they differ (different program or path)
|
||||
$self->{_header}{$1} = unescape_key($2) if $keep;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
sub unescape_key { # inverse of escape_key() in DBI::ProfileDumper
|
||||
local $_ = shift;
|
||||
s/(?<!\\)\\n/\n/g; # expand \n, unless it's a \\n
|
||||
s/(?<!\\)\\r/\r/g; # expand \r, unless it's a \\r
|
||||
s/\\\\/\\/g; # \\ to \
|
||||
return $_;
|
||||
}
|
||||
|
||||
|
||||
# reads the body of the profile data
|
||||
sub _read_body {
|
||||
my ($self, $fh, $filename) = @_;
|
||||
my $nodes = $self->{_nodes};
|
||||
my $lookup = $self->{_node_lookup};
|
||||
my $filter = $self->{Filter};
|
||||
|
||||
# build up node array
|
||||
my @path = ("");
|
||||
my (@data, $path_key);
|
||||
local $_;
|
||||
while (<$fh>) {
|
||||
chomp;
|
||||
if (/^\+\s+(\d+)\s?(.*)/) {
|
||||
# it's a key
|
||||
my ($key, $index) = ($2, $1 - 1);
|
||||
|
||||
$#path = $index; # truncate path to new length
|
||||
$path[$index] = unescape_key($key); # place new key at end
|
||||
|
||||
}
|
||||
elsif (s/^=\s+//) {
|
||||
# it's data - file in the node array with the path in index 0
|
||||
# (the optional minus is to make it more robust against systems
|
||||
# with unstable high-res clocks - typically due to poor NTP config
|
||||
# of kernel SMP behaviour, i.e. min time may be -0.000008))
|
||||
|
||||
@data = split / /, $_;
|
||||
|
||||
# corrupt data?
|
||||
croak("Invalid number of fields in $filename line $.: $_")
|
||||
unless @data == 7;
|
||||
croak("Invalid leaf node characters $filename line $.: $_")
|
||||
unless m/^[-+ 0-9eE\.]+$/;
|
||||
|
||||
# hook to enable pre-processing of the data - such as mangling SQL
|
||||
# so that slightly different statements get treated as the same
|
||||
# and so merged in the results
|
||||
$filter->(\@path, \@data) if $filter;
|
||||
|
||||
# elements of @path can't have NULLs in them, so this
|
||||
# forms a unique string per @path. If there's some way I
|
||||
# can get this without arbitrarily stripping out a
|
||||
# character I'd be happy to hear it!
|
||||
$path_key = join("\0",@path);
|
||||
|
||||
# look for previous entry
|
||||
if (exists $lookup->{$path_key}) {
|
||||
# merge in the new data
|
||||
dbi_profile_merge($nodes->[$lookup->{$path_key}], \@data);
|
||||
} else {
|
||||
# insert a new node - nodes are arrays with data in 0-6
|
||||
# and path data after that
|
||||
push(@$nodes, [ @data, @path ]);
|
||||
|
||||
# record node in %seen
|
||||
$lookup->{$path_key} = $#$nodes;
|
||||
}
|
||||
}
|
||||
else {
|
||||
croak("Invalid line type syntax error in $filename line $.: $_");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
=head2 $copy = $prof->clone();
|
||||
|
||||
Clone a profile data set creating a new object.
|
||||
|
||||
=cut
|
||||
|
||||
sub clone {
|
||||
my $self = shift;
|
||||
|
||||
# start with a simple copy
|
||||
my $clone = bless { %$self }, ref($self);
|
||||
|
||||
# deep copy nodes
|
||||
$clone->{_nodes} = [ map { [ @$_ ] } @{$self->{_nodes}} ];
|
||||
|
||||
# deep copy header
|
||||
$clone->{_header} = { %{$self->{_header}} };
|
||||
|
||||
return $clone;
|
||||
}
|
||||
|
||||
=head2 $header = $prof->header();
|
||||
|
||||
Returns a reference to a hash of header values. These are the key
|
||||
value pairs included in the header section of the L<DBI::ProfileDumper>
|
||||
data format. For example:
|
||||
|
||||
$header = {
|
||||
Path => [ '!Statement', '!MethodName' ],
|
||||
Program => 't/42profile_data.t',
|
||||
};
|
||||
|
||||
Note that modifying this hash will modify the header data stored
|
||||
inside the profile object.
|
||||
|
||||
=cut
|
||||
|
||||
sub header { shift->{_header} }
|
||||
|
||||
|
||||
=head2 $nodes = $prof->nodes()
|
||||
|
||||
Returns a reference the sorted nodes array. Each element in the array
|
||||
is a single record in the data set. The first seven elements are the
|
||||
same as the elements provided by L<DBI::Profile>. After that each key is
|
||||
in a separate element. For example:
|
||||
|
||||
$nodes = [
|
||||
[
|
||||
2, # 0, count
|
||||
0.0312958955764771, # 1, total duration
|
||||
0.000490069389343262, # 2, first duration
|
||||
0.000176072120666504, # 3, shortest duration
|
||||
0.00140702724456787, # 4, longest duration
|
||||
1023115819.83019, # 5, time of first event
|
||||
1023115819.86576, # 6, time of last event
|
||||
'SELECT foo FROM bar' # 7, key1
|
||||
'execute' # 8, key2
|
||||
# 6+N, keyN
|
||||
],
|
||||
# ...
|
||||
];
|
||||
|
||||
Note that modifying this array will modify the node data stored inside
|
||||
the profile object.
|
||||
|
||||
=cut
|
||||
|
||||
sub nodes { shift->{_nodes} }
|
||||
|
||||
|
||||
=head2 $count = $prof->count()
|
||||
|
||||
Returns the number of items in the profile data set.
|
||||
|
||||
=cut
|
||||
|
||||
sub count { scalar @{shift->{_nodes}} }
|
||||
|
||||
|
||||
=head2 $prof->sort(field => "field")
|
||||
|
||||
=head2 $prof->sort(field => "field", reverse => 1)
|
||||
|
||||
Sorts data by the given field. Available fields are:
|
||||
|
||||
longest
|
||||
total
|
||||
count
|
||||
shortest
|
||||
|
||||
The default sort is greatest to smallest, which is the opposite of the
|
||||
normal Perl meaning. This, however, matches the expected behavior of
|
||||
the dbiprof frontend.
|
||||
|
||||
=cut
|
||||
|
||||
|
||||
# sorts data by one of the available fields
|
||||
{
|
||||
my %FIELDS = (
|
||||
longest => LONGEST,
|
||||
total => TOTAL,
|
||||
count => COUNT,
|
||||
shortest => SHORTEST,
|
||||
key1 => PATH+0,
|
||||
key2 => PATH+1,
|
||||
key3 => PATH+2,
|
||||
);
|
||||
sub sort {
|
||||
my $self = shift;
|
||||
my $nodes = $self->{_nodes};
|
||||
my %opt = @_;
|
||||
|
||||
croak("Missing required field option.") unless $opt{field};
|
||||
|
||||
my $index = $FIELDS{$opt{field}};
|
||||
|
||||
croak("Unrecognized sort field '$opt{field}'.")
|
||||
unless defined $index;
|
||||
|
||||
# sort over index
|
||||
if ($opt{reverse}) {
|
||||
@$nodes = sort {
|
||||
$a->[$index] <=> $b->[$index]
|
||||
} @$nodes;
|
||||
} else {
|
||||
@$nodes = sort {
|
||||
$b->[$index] <=> $a->[$index]
|
||||
} @$nodes;
|
||||
}
|
||||
|
||||
# remember how we're sorted
|
||||
$self->{_sort} = $opt{field};
|
||||
|
||||
return $self;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
=head2 $count = $prof->exclude(key2 => "disconnect")
|
||||
|
||||
=head2 $count = $prof->exclude(key2 => "disconnect", case_sensitive => 1)
|
||||
|
||||
=head2 $count = $prof->exclude(key1 => qr/^SELECT/i)
|
||||
|
||||
Removes records from the data set that match the given string or
|
||||
regular expression. This method modifies the data in a permanent
|
||||
fashion - use clone() first to maintain the original data after
|
||||
exclude(). Returns the number of nodes left in the profile data set.
|
||||
|
||||
=cut
|
||||
|
||||
sub exclude {
|
||||
my $self = shift;
|
||||
my $nodes = $self->{_nodes};
|
||||
my %opt = @_;
|
||||
|
||||
# find key index number
|
||||
my ($index, $val);
|
||||
foreach (keys %opt) {
|
||||
if (/^key(\d+)$/) {
|
||||
$index = PATH + $1 - 1;
|
||||
$val = $opt{$_};
|
||||
last;
|
||||
}
|
||||
}
|
||||
croak("Missing required keyN option.") unless $index;
|
||||
|
||||
if (UNIVERSAL::isa($val,"Regexp")) {
|
||||
# regex match
|
||||
@$nodes = grep {
|
||||
$#$_ < $index or $_->[$index] !~ /$val/
|
||||
} @$nodes;
|
||||
} else {
|
||||
if ($opt{case_sensitive}) {
|
||||
@$nodes = grep {
|
||||
$#$_ < $index or $_->[$index] ne $val;
|
||||
} @$nodes;
|
||||
} else {
|
||||
$val = lc $val;
|
||||
@$nodes = grep {
|
||||
$#$_ < $index or lc($_->[$index]) ne $val;
|
||||
} @$nodes;
|
||||
}
|
||||
}
|
||||
|
||||
return scalar @$nodes;
|
||||
}
|
||||
|
||||
|
||||
=head2 $count = $prof->match(key2 => "disconnect")
|
||||
|
||||
=head2 $count = $prof->match(key2 => "disconnect", case_sensitive => 1)
|
||||
|
||||
=head2 $count = $prof->match(key1 => qr/^SELECT/i)
|
||||
|
||||
Removes records from the data set that do not match the given string
|
||||
or regular expression. This method modifies the data in a permanent
|
||||
fashion - use clone() first to maintain the original data after
|
||||
match(). Returns the number of nodes left in the profile data set.
|
||||
|
||||
=cut
|
||||
|
||||
sub match {
|
||||
my $self = shift;
|
||||
my $nodes = $self->{_nodes};
|
||||
my %opt = @_;
|
||||
|
||||
# find key index number
|
||||
my ($index, $val);
|
||||
foreach (keys %opt) {
|
||||
if (/^key(\d+)$/) {
|
||||
$index = PATH + $1 - 1;
|
||||
$val = $opt{$_};
|
||||
last;
|
||||
}
|
||||
}
|
||||
croak("Missing required keyN option.") unless $index;
|
||||
|
||||
if (UNIVERSAL::isa($val,"Regexp")) {
|
||||
# regex match
|
||||
@$nodes = grep {
|
||||
$#$_ >= $index and $_->[$index] =~ /$val/
|
||||
} @$nodes;
|
||||
} else {
|
||||
if ($opt{case_sensitive}) {
|
||||
@$nodes = grep {
|
||||
$#$_ >= $index and $_->[$index] eq $val;
|
||||
} @$nodes;
|
||||
} else {
|
||||
$val = lc $val;
|
||||
@$nodes = grep {
|
||||
$#$_ >= $index and lc($_->[$index]) eq $val;
|
||||
} @$nodes;
|
||||
}
|
||||
}
|
||||
|
||||
return scalar @$nodes;
|
||||
}
|
||||
|
||||
|
||||
=head2 $Data = $prof->Data()
|
||||
|
||||
Returns the same Data hash structure as seen in L<DBI::Profile>. This
|
||||
structure is not sorted. The nodes() structure probably makes more
|
||||
sense for most analysis.
|
||||
|
||||
=cut
|
||||
|
||||
sub Data {
|
||||
my $self = shift;
|
||||
my (%Data, @data, $ptr);
|
||||
|
||||
foreach my $node (@{$self->{_nodes}}) {
|
||||
# traverse to key location
|
||||
$ptr = \%Data;
|
||||
foreach my $key (@{$node}[PATH .. $#$node - 1]) {
|
||||
$ptr->{$key} = {} unless exists $ptr->{$key};
|
||||
$ptr = $ptr->{$key};
|
||||
}
|
||||
|
||||
# slice out node data
|
||||
$ptr->{$node->[-1]} = [ @{$node}[0 .. 6] ];
|
||||
}
|
||||
|
||||
return \%Data;
|
||||
}
|
||||
|
||||
|
||||
=head2 $text = $prof->format($nodes->[0])
|
||||
|
||||
Formats a single node into a human-readable block of text.
|
||||
|
||||
=cut
|
||||
|
||||
sub format {
|
||||
my ($self, $node) = @_;
|
||||
my $format;
|
||||
|
||||
# setup keys
|
||||
my $keys = "";
|
||||
for (my $i = PATH; $i <= $#$node; $i++) {
|
||||
my $key = $node->[$i];
|
||||
|
||||
# remove leading and trailing space
|
||||
$key =~ s/^\s+//;
|
||||
$key =~ s/\s+$//;
|
||||
|
||||
# if key has newlines or is long take special precautions
|
||||
if (length($key) > 72 or $key =~ /\n/) {
|
||||
$keys .= " Key " . ($i - PATH + 1) . " :\n\n$key\n\n";
|
||||
} else {
|
||||
$keys .= " Key " . ($i - PATH + 1) . " : $key\n";
|
||||
}
|
||||
}
|
||||
|
||||
# nodes with multiple runs get the long entry format, nodes with
|
||||
# just one run get a single count.
|
||||
if ($node->[COUNT] > 1) {
|
||||
$format = <<END;
|
||||
Count : %d
|
||||
Total Time : %3.6f seconds
|
||||
Longest Time : %3.6f seconds
|
||||
Shortest Time : %3.6f seconds
|
||||
Average Time : %3.6f seconds
|
||||
END
|
||||
return sprintf($format, @{$node}[COUNT,TOTAL,LONGEST,SHORTEST],
|
||||
$node->[TOTAL] / $node->[COUNT]) . $keys;
|
||||
} else {
|
||||
$format = <<END;
|
||||
Count : %d
|
||||
Time : %3.6f seconds
|
||||
END
|
||||
|
||||
return sprintf($format, @{$node}[COUNT,TOTAL]) . $keys;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
=head2 $text = $prof->report(number => 10)
|
||||
|
||||
Produces a report with the given number of items.
|
||||
|
||||
=cut
|
||||
|
||||
sub report {
|
||||
my $self = shift;
|
||||
my $nodes = $self->{_nodes};
|
||||
my %opt = @_;
|
||||
|
||||
croak("Missing required number option") unless exists $opt{number};
|
||||
|
||||
$opt{number} = @$nodes if @$nodes < $opt{number};
|
||||
|
||||
my $report = $self->_report_header($opt{number});
|
||||
for (0 .. $opt{number} - 1) {
|
||||
$report .= sprintf("#" x 5 . "[ %d ]". "#" x 59 . "\n",
|
||||
$_ + 1);
|
||||
$report .= $self->format($nodes->[$_]);
|
||||
$report .= "\n";
|
||||
}
|
||||
return $report;
|
||||
}
|
||||
|
||||
# format the header for report()
|
||||
sub _report_header {
|
||||
my ($self, $number) = @_;
|
||||
my $nodes = $self->{_nodes};
|
||||
my $node_count = @$nodes;
|
||||
|
||||
# find total runtime and method count
|
||||
my ($time, $count) = (0,0);
|
||||
foreach my $node (@$nodes) {
|
||||
$time += $node->[TOTAL];
|
||||
$count += $node->[COUNT];
|
||||
}
|
||||
|
||||
my $header = <<END;
|
||||
|
||||
DBI Profile Data ($self->{_profiler})
|
||||
|
||||
END
|
||||
|
||||
# output header fields
|
||||
while (my ($key, $value) = each %{$self->{_header}}) {
|
||||
$header .= sprintf(" %-13s : %s\n", $key, $value);
|
||||
}
|
||||
|
||||
# output summary data fields
|
||||
$header .= sprintf(<<END, $node_count, $number, $self->{_sort}, $count, $time);
|
||||
Total Records : %d (showing %d, sorted by %s)
|
||||
Total Count : %d
|
||||
Total Runtime : %3.6f seconds
|
||||
|
||||
END
|
||||
|
||||
return $header;
|
||||
}
|
||||
|
||||
|
||||
1;
|
||||
|
||||
__END__
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Sam Tregar <sam@tregar.com>
|
||||
|
||||
=head1 COPYRIGHT AND LICENSE
|
||||
|
||||
Copyright (C) 2002 Sam Tregar
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the same terms as Perl 5 itself.
|
||||
|
||||
=cut
|
|
@ -0,0 +1,351 @@
|
|||
package DBI::ProfileDumper;
|
||||
use strict;
|
||||
|
||||
=head1 NAME
|
||||
|
||||
DBI::ProfileDumper - profile DBI usage and output data to a file
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
To profile an existing program using DBI::ProfileDumper, set the
|
||||
DBI_PROFILE environment variable and run your program as usual. For
|
||||
example, using bash:
|
||||
|
||||
DBI_PROFILE=2/DBI::ProfileDumper program.pl
|
||||
|
||||
Then analyze the generated file (F<dbi.prof>) with L<dbiprof|dbiprof>:
|
||||
|
||||
dbiprof
|
||||
|
||||
You can also activate DBI::ProfileDumper from within your code:
|
||||
|
||||
use DBI;
|
||||
|
||||
# profile with default path (2) and output file (dbi.prof)
|
||||
$dbh->{Profile} = "!Statement/DBI::ProfileDumper";
|
||||
|
||||
# same thing, spelled out
|
||||
$dbh->{Profile} = "!Statement/DBI::ProfileDumper/File:dbi.prof";
|
||||
|
||||
# another way to say it
|
||||
use DBI::ProfileDumper;
|
||||
$dbh->{Profile} = DBI::ProfileDumper->new(
|
||||
Path => [ '!Statement' ],
|
||||
File => 'dbi.prof' );
|
||||
|
||||
# using a custom path
|
||||
$dbh->{Profile} = DBI::ProfileDumper->new(
|
||||
Path => [ "foo", "bar" ],
|
||||
File => 'dbi.prof',
|
||||
);
|
||||
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
DBI::ProfileDumper is a subclass of L<DBI::Profile|DBI::Profile> which
|
||||
dumps profile data to disk instead of printing a summary to your
|
||||
screen. You can then use L<dbiprof|dbiprof> to analyze the data in
|
||||
a number of interesting ways, or you can roll your own analysis using
|
||||
L<DBI::ProfileData|DBI::ProfileData>.
|
||||
|
||||
B<NOTE:> For Apache/mod_perl applications, use
|
||||
L<DBI::ProfileDumper::Apache|DBI::ProfileDumper::Apache>.
|
||||
|
||||
=head1 USAGE
|
||||
|
||||
One way to use this module is just to enable it in your C<$dbh>:
|
||||
|
||||
$dbh->{Profile} = "1/DBI::ProfileDumper";
|
||||
|
||||
This will write out profile data by statement into a file called
|
||||
F<dbi.prof>. If you want to modify either of these properties, you
|
||||
can construct the DBI::ProfileDumper object yourself:
|
||||
|
||||
use DBI::ProfileDumper;
|
||||
$dbh->{Profile} = DBI::ProfileDumper->new(
|
||||
Path => [ '!Statement' ],
|
||||
File => 'dbi.prof'
|
||||
);
|
||||
|
||||
The C<Path> option takes the same values as in
|
||||
L<DBI::Profile>. The C<File> option gives the name of the
|
||||
file where results will be collected. If it already exists it will be
|
||||
overwritten.
|
||||
|
||||
You can also activate this module by setting the DBI_PROFILE
|
||||
environment variable:
|
||||
|
||||
$ENV{DBI_PROFILE} = "!Statement/DBI::ProfileDumper";
|
||||
|
||||
This will cause all DBI handles to share the same profiling object.
|
||||
|
||||
=head1 METHODS
|
||||
|
||||
The following methods are available to be called using the profile
|
||||
object. You can get access to the profile object from the Profile key
|
||||
in any DBI handle:
|
||||
|
||||
my $profile = $dbh->{Profile};
|
||||
|
||||
=head2 flush_to_disk
|
||||
|
||||
$profile->flush_to_disk()
|
||||
|
||||
Flushes all collected profile data to disk and empties the Data hash. Returns
|
||||
the filename written to. If no profile data has been collected then the file is
|
||||
not written and flush_to_disk() returns undef.
|
||||
|
||||
The file is locked while it's being written. A process 'consuming' the files
|
||||
while they're being written to, should rename the file first, then lock it,
|
||||
then read it, then close and delete it. The C<DeleteFiles> option to
|
||||
L<DBI::ProfileData> does the right thing.
|
||||
|
||||
This method may be called multiple times during a program run.
|
||||
|
||||
=head2 empty
|
||||
|
||||
$profile->empty()
|
||||
|
||||
Clears the Data hash without writing to disk.
|
||||
|
||||
=head2 filename
|
||||
|
||||
$filename = $profile->filename();
|
||||
|
||||
Get or set the filename.
|
||||
|
||||
The filename can be specified as a CODE reference, in which case the referenced
|
||||
code should return the filename to be used. The code will be called with the
|
||||
profile object as its first argument.
|
||||
|
||||
=head1 DATA FORMAT
|
||||
|
||||
The data format written by DBI::ProfileDumper starts with a header
|
||||
containing the version number of the module used to generate it. Then
|
||||
a block of variable declarations describes the profile. After two
|
||||
newlines, the profile data forms the body of the file. For example:
|
||||
|
||||
DBI::ProfileDumper 2.003762
|
||||
Path = [ '!Statement', '!MethodName' ]
|
||||
Program = t/42profile_data.t
|
||||
|
||||
+ 1 SELECT name FROM users WHERE id = ?
|
||||
+ 2 prepare
|
||||
= 1 0.0312958955764771 0.000490069389343262 0.000176072120666504 0.00140702724456787 1023115819.83019 1023115819.86576
|
||||
+ 2 execute
|
||||
1 0.0312958955764771 0.000490069389343262 0.000176072120666504 0.00140702724456787 1023115819.83019 1023115819.86576
|
||||
+ 2 fetchrow_hashref
|
||||
= 1 0.0312958955764771 0.000490069389343262 0.000176072120666504 0.00140702724456787 1023115819.83019 1023115819.86576
|
||||
+ 1 UPDATE users SET name = ? WHERE id = ?
|
||||
+ 2 prepare
|
||||
= 1 0.0312958955764771 0.000490069389343262 0.000176072120666504 0.00140702724456787 1023115819.83019 1023115819.86576
|
||||
+ 2 execute
|
||||
= 1 0.0312958955764771 0.000490069389343262 0.000176072120666504 0.00140702724456787 1023115819.83019 1023115819.86576
|
||||
|
||||
The lines beginning with C<+> signs signify keys. The number after
|
||||
the C<+> sign shows the nesting level of the key. Lines beginning
|
||||
with C<=> are the actual profile data, in the same order as
|
||||
in DBI::Profile.
|
||||
|
||||
Note that the same path may be present multiple times in the data file
|
||||
since C<format()> may be called more than once. When read by
|
||||
DBI::ProfileData the data points will be merged to produce a single
|
||||
data set for each distinct path.
|
||||
|
||||
The key strings are transformed in three ways. First, all backslashes
|
||||
are doubled. Then all newlines and carriage-returns are transformed
|
||||
into C<\n> and C<\r> respectively. Finally, any NULL bytes (C<\0>)
|
||||
are entirely removed. When DBI::ProfileData reads the file the first
|
||||
two transformations will be reversed, but NULL bytes will not be
|
||||
restored.
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Sam Tregar <sam@tregar.com>
|
||||
|
||||
=head1 COPYRIGHT AND LICENSE
|
||||
|
||||
Copyright (C) 2002 Sam Tregar
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the same terms as Perl 5 itself.
|
||||
|
||||
=cut
|
||||
|
||||
# inherit from DBI::Profile
|
||||
use DBI::Profile;
|
||||
|
||||
our @ISA = ("DBI::Profile");
|
||||
|
||||
our $VERSION = "2.015325";
|
||||
|
||||
use Carp qw(croak);
|
||||
use Fcntl qw(:flock);
|
||||
use Symbol;
|
||||
|
||||
my $HAS_FLOCK = (defined $ENV{DBI_PROFILE_FLOCK})
|
||||
? $ENV{DBI_PROFILE_FLOCK}
|
||||
: do { local $@; eval { flock STDOUT, 0; 1 } };
|
||||
|
||||
my $program_header;
|
||||
|
||||
|
||||
# validate params and setup default
|
||||
sub new {
|
||||
my $pkg = shift;
|
||||
my $self = $pkg->SUPER::new(
|
||||
LockFile => $HAS_FLOCK,
|
||||
@_,
|
||||
);
|
||||
|
||||
# provide a default filename
|
||||
$self->filename("dbi.prof") unless $self->filename;
|
||||
|
||||
DBI->trace_msg("$self: @{[ %$self ]}\n",0)
|
||||
if $self->{Trace} && $self->{Trace} >= 2;
|
||||
|
||||
return $self;
|
||||
}
|
||||
|
||||
|
||||
# get/set filename to use
|
||||
sub filename {
|
||||
my $self = shift;
|
||||
$self->{File} = shift if @_;
|
||||
my $filename = $self->{File};
|
||||
$filename = $filename->($self) if ref($filename) eq 'CODE';
|
||||
return $filename;
|
||||
}
|
||||
|
||||
|
||||
# flush available data to disk
|
||||
sub flush_to_disk {
|
||||
my $self = shift;
|
||||
my $class = ref $self;
|
||||
my $filename = $self->filename;
|
||||
my $data = $self->{Data};
|
||||
|
||||
if (1) { # make an option
|
||||
if (not $data or ref $data eq 'HASH' && !%$data) {
|
||||
DBI->trace_msg("flush_to_disk skipped for empty profile\n",0) if $self->{Trace};
|
||||
return undef;
|
||||
}
|
||||
}
|
||||
|
||||
my $fh = gensym;
|
||||
if (($self->{_wrote_header}||'') eq $filename) {
|
||||
# append more data to the file
|
||||
# XXX assumes that Path hasn't changed
|
||||
open($fh, ">>", $filename)
|
||||
or croak("Unable to open '$filename' for $class output: $!");
|
||||
} else {
|
||||
# create new file (or overwrite existing)
|
||||
if (-f $filename) {
|
||||
my $bak = $filename.'.prev';
|
||||
unlink($bak);
|
||||
rename($filename, $bak)
|
||||
or warn "Error renaming $filename to $bak: $!\n";
|
||||
}
|
||||
open($fh, ">", $filename)
|
||||
or croak("Unable to open '$filename' for $class output: $!");
|
||||
}
|
||||
# lock the file (before checking size and writing the header)
|
||||
flock($fh, LOCK_EX) if $self->{LockFile};
|
||||
# write header if file is empty - typically because we just opened it
|
||||
# in '>' mode, or perhaps we used '>>' but the file had been truncated externally.
|
||||
if (-s $fh == 0) {
|
||||
DBI->trace_msg("flush_to_disk wrote header to $filename\n",0) if $self->{Trace};
|
||||
$self->write_header($fh);
|
||||
$self->{_wrote_header} = $filename;
|
||||
}
|
||||
|
||||
my $lines = $self->write_data($fh, $self->{Data}, 1);
|
||||
DBI->trace_msg("flush_to_disk wrote $lines lines to $filename\n",0) if $self->{Trace};
|
||||
|
||||
close($fh) # unlocks the file
|
||||
or croak("Error closing '$filename': $!");
|
||||
|
||||
$self->empty();
|
||||
|
||||
|
||||
return $filename;
|
||||
}
|
||||
|
||||
|
||||
# write header to a filehandle
|
||||
sub write_header {
|
||||
my ($self, $fh) = @_;
|
||||
|
||||
# isolate us against globals which effect print
|
||||
local($\, $,);
|
||||
|
||||
# $self->VERSION can return undef during global destruction
|
||||
my $version = $self->VERSION || $VERSION;
|
||||
|
||||
# module name and version number
|
||||
print $fh ref($self)." $version\n";
|
||||
|
||||
# print out Path (may contain CODE refs etc)
|
||||
my @path_words = map { escape_key($_) } @{ $self->{Path} || [] };
|
||||
print $fh "Path = [ ", join(', ', @path_words), " ]\n";
|
||||
|
||||
# print out $0 and @ARGV
|
||||
if (!$program_header) {
|
||||
# XXX should really quote as well as escape
|
||||
$program_header = "Program = "
|
||||
. join(" ", map { escape_key($_) } $0, @ARGV)
|
||||
. "\n";
|
||||
}
|
||||
print $fh $program_header;
|
||||
|
||||
# all done
|
||||
print $fh "\n";
|
||||
}
|
||||
|
||||
|
||||
# write data in the proscribed format
|
||||
sub write_data {
|
||||
my ($self, $fh, $data, $level) = @_;
|
||||
|
||||
# XXX it's valid for $data to be an ARRAY ref, i.e., Path is empty.
|
||||
# produce an empty profile for invalid $data
|
||||
return 0 unless $data and UNIVERSAL::isa($data,'HASH');
|
||||
|
||||
# isolate us against globals which affect print
|
||||
local ($\, $,);
|
||||
|
||||
my $lines = 0;
|
||||
while (my ($key, $value) = each(%$data)) {
|
||||
# output a key
|
||||
print $fh "+ $level ". escape_key($key). "\n";
|
||||
if (UNIVERSAL::isa($value,'ARRAY')) {
|
||||
# output a data set for a leaf node
|
||||
print $fh "= ".join(' ', @$value)."\n";
|
||||
$lines += 1;
|
||||
} else {
|
||||
# recurse through keys - this could be rewritten to use a
|
||||
# stack for some small performance gain
|
||||
$lines += $self->write_data($fh, $value, $level + 1);
|
||||
}
|
||||
}
|
||||
return $lines;
|
||||
}
|
||||
|
||||
|
||||
# escape a key for output
|
||||
sub escape_key {
|
||||
my $key = shift;
|
||||
$key =~ s!\\!\\\\!g;
|
||||
$key =~ s!\n!\\n!g;
|
||||
$key =~ s!\r!\\r!g;
|
||||
$key =~ s!\0!!g;
|
||||
return $key;
|
||||
}
|
||||
|
||||
|
||||
# flush data to disk when profile object goes out of scope
|
||||
sub on_destroy {
|
||||
shift->flush_to_disk();
|
||||
}
|
||||
|
||||
1;
|
|
@ -0,0 +1,219 @@
|
|||
package DBI::ProfileDumper::Apache;
|
||||
|
||||
use strict;
|
||||
|
||||
=head1 NAME
|
||||
|
||||
DBI::ProfileDumper::Apache - capture DBI profiling data from Apache/mod_perl
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
Add this line to your F<httpd.conf>:
|
||||
|
||||
PerlSetEnv DBI_PROFILE 2/DBI::ProfileDumper::Apache
|
||||
|
||||
(If you're using mod_perl2, see L</When using mod_perl2> for some additional notes.)
|
||||
|
||||
Then restart your server. Access the code you wish to test using a
|
||||
web browser, then shutdown your server. This will create a set of
|
||||
F<dbi.prof.*> files in your Apache log directory.
|
||||
|
||||
Get a profiling report with L<dbiprof|dbiprof>:
|
||||
|
||||
dbiprof /path/to/your/apache/logs/dbi.prof.*
|
||||
|
||||
When you're ready to perform another profiling run, delete the old files and start again.
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
This module interfaces DBI::ProfileDumper to Apache/mod_perl. Using
|
||||
this module you can collect profiling data from mod_perl applications.
|
||||
It works by creating a DBI::ProfileDumper data file for each Apache
|
||||
process. These files are created in your Apache log directory. You
|
||||
can then use the dbiprof utility to analyze the profile files.
|
||||
|
||||
=head1 USAGE
|
||||
|
||||
=head2 LOADING THE MODULE
|
||||
|
||||
The easiest way to use this module is just to set the DBI_PROFILE
|
||||
environment variable in your F<httpd.conf>:
|
||||
|
||||
PerlSetEnv DBI_PROFILE 2/DBI::ProfileDumper::Apache
|
||||
|
||||
The DBI will look after loading and using the module when the first DBI handle
|
||||
is created.
|
||||
|
||||
It's also possible to use this module by setting the Profile attribute
|
||||
of any DBI handle:
|
||||
|
||||
$dbh->{Profile} = "2/DBI::ProfileDumper::Apache";
|
||||
|
||||
See L<DBI::ProfileDumper> for more possibilities, and L<DBI::Profile> for full
|
||||
details of the DBI's profiling mechanism.
|
||||
|
||||
=head2 WRITING PROFILE DATA
|
||||
|
||||
The profile data files will be written to your Apache log directory by default.
|
||||
|
||||
The user that the httpd processes run as will need write access to the
|
||||
directory. So, for example, if you're running the child httpds as user 'nobody'
|
||||
and using chronolog to write to the logs directory, then you'll need to change
|
||||
the default.
|
||||
|
||||
You can change the destination directory either by specifying a C<Dir> value
|
||||
when creating the profile (like C<File> in the L<DBI::ProfileDumper> docs),
|
||||
or you can use the C<DBI_PROFILE_APACHE_LOG_DIR> env var to change that. For example:
|
||||
|
||||
PerlSetEnv DBI_PROFILE_APACHE_LOG_DIR /server_root/logs
|
||||
|
||||
=head3 When using mod_perl2
|
||||
|
||||
Under mod_perl2 you'll need to either set the C<DBI_PROFILE_APACHE_LOG_DIR> env var,
|
||||
or enable the mod_perl2 C<GlobalRequest> option, like this:
|
||||
|
||||
PerlOptions +GlobalRequest
|
||||
|
||||
to the global config section you're about test with DBI::ProfileDumper::Apache.
|
||||
If you don't do one of those then you'll see messages in your error_log similar to:
|
||||
|
||||
DBI::ProfileDumper::Apache on_destroy failed: Global $r object is not available. Set:
|
||||
PerlOptions +GlobalRequest in httpd.conf at ..../DBI/ProfileDumper/Apache.pm line 144
|
||||
|
||||
=head3 Naming the files
|
||||
|
||||
The default file name is inherited from L<DBI::ProfileDumper> via the
|
||||
filename() method, but DBI::ProfileDumper::Apache appends the parent pid and
|
||||
the current pid, separated by dots, to that name.
|
||||
|
||||
=head3 Silencing the log
|
||||
|
||||
By default a message is written to STDERR (i.e., the apache error_log file)
|
||||
when flush_to_disk() is called (either explicitly, or implicitly via DESTROY).
|
||||
|
||||
That's usually very useful. If you don't want the log message you can silence
|
||||
it by setting the C<Quiet> attribute true.
|
||||
|
||||
PerlSetEnv DBI_PROFILE 2/DBI::ProfileDumper::Apache/Quiet:1
|
||||
|
||||
$dbh->{Profile} = "!Statement/DBI::ProfileDumper/Quiet:1";
|
||||
|
||||
$dbh->{Profile} = DBI::ProfileDumper->new(
|
||||
Path => [ '!Statement' ]
|
||||
Quiet => 1
|
||||
);
|
||||
|
||||
|
||||
=head2 GATHERING PROFILE DATA
|
||||
|
||||
Once you have the module loaded, use your application as you normally
|
||||
would. Stop the webserver when your tests are complete. Profile data
|
||||
files will be produced when Apache exits and you'll see something like
|
||||
this in your error_log:
|
||||
|
||||
DBI::ProfileDumper::Apache writing to /usr/local/apache/logs/dbi.prof.2604.2619
|
||||
|
||||
Now you can use dbiprof to examine the data:
|
||||
|
||||
dbiprof /usr/local/apache/logs/dbi.prof.2604.*
|
||||
|
||||
By passing dbiprof a list of all generated files, dbiprof will
|
||||
automatically merge them into one result set. You can also pass
|
||||
dbiprof sorting and querying options, see L<dbiprof> for details.
|
||||
|
||||
=head2 CLEANING UP
|
||||
|
||||
Once you've made some code changes, you're ready to start again.
|
||||
First, delete the old profile data files:
|
||||
|
||||
rm /usr/local/apache/logs/dbi.prof.*
|
||||
|
||||
Then restart your server and get back to work.
|
||||
|
||||
=head1 OTHER ISSUES
|
||||
|
||||
=head2 Memory usage
|
||||
|
||||
DBI::Profile can use a lot of memory for very active applications because it
|
||||
collects profiling data in memory for each distinct query run.
|
||||
Calling C<flush_to_disk()> will write the current data to disk and free the
|
||||
memory it's using. For example:
|
||||
|
||||
$dbh->{Profile}->flush_to_disk() if $dbh->{Profile};
|
||||
|
||||
or, rather than flush every time, you could flush less often:
|
||||
|
||||
$dbh->{Profile}->flush_to_disk()
|
||||
if $dbh->{Profile} and ++$i % 100;
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Sam Tregar <sam@tregar.com>
|
||||
|
||||
=head1 COPYRIGHT AND LICENSE
|
||||
|
||||
Copyright (C) 2002 Sam Tregar
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the same terms as Perl 5 itself.
|
||||
|
||||
=cut
|
||||
|
||||
our $VERSION = "2.014121";
|
||||
|
||||
our @ISA = qw(DBI::ProfileDumper);
|
||||
|
||||
use DBI::ProfileDumper;
|
||||
use File::Spec;
|
||||
|
||||
my $initial_pid = $$;
|
||||
|
||||
use constant MP2 => ($ENV{MOD_PERL_API_VERSION} and $ENV{MOD_PERL_API_VERSION} == 2) ? 1 : 0;
|
||||
|
||||
my $server_root_dir;
|
||||
|
||||
if (MP2) {
|
||||
require Apache2::ServerUtil;
|
||||
$server_root_dir = Apache2::ServerUtil::server_root();
|
||||
}
|
||||
else {
|
||||
require Apache;
|
||||
$server_root_dir = eval { Apache->server_root_relative('') } || "/tmp";
|
||||
}
|
||||
|
||||
|
||||
sub _dirname {
|
||||
my $self = shift;
|
||||
return $self->{Dir} ||= $ENV{DBI_PROFILE_APACHE_LOG_DIR}
|
||||
|| File::Spec->catdir($server_root_dir, "logs");
|
||||
}
|
||||
|
||||
|
||||
sub filename {
|
||||
my $self = shift;
|
||||
my $filename = $self->SUPER::filename(@_);
|
||||
return $filename if not $filename; # not set yet
|
||||
|
||||
# to be able to identify groups of profile files from the same set of
|
||||
# apache processes, we include the parent pid in the file name
|
||||
# as well as the pid.
|
||||
my $group_pid = ($$ eq $initial_pid) ? $$ : getppid();
|
||||
$filename .= ".$group_pid.$$";
|
||||
|
||||
return $filename if File::Spec->file_name_is_absolute($filename);
|
||||
return File::Spec->catfile($self->_dirname, $filename);
|
||||
}
|
||||
|
||||
|
||||
sub flush_to_disk {
|
||||
my $self = shift;
|
||||
|
||||
my $filename = $self->SUPER::flush_to_disk(@_);
|
||||
|
||||
print STDERR ref($self)." pid$$ written to $filename\n"
|
||||
if $filename && not $self->{Quiet};
|
||||
|
||||
return $filename;
|
||||
}
|
||||
|
||||
1;
|
|
@ -0,0 +1,50 @@
|
|||
package DBI::ProfileSubs;
|
||||
|
||||
our $VERSION = "0.009396";
|
||||
|
||||
=head1 NAME
|
||||
|
||||
DBI::ProfileSubs - Subroutines for dynamic profile Path
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
DBI_PROFILE='&norm_std_n3' prog.pl
|
||||
|
||||
This is new and still experimental.
|
||||
|
||||
=head1 TO DO
|
||||
|
||||
Define come kind of naming convention for the subs.
|
||||
|
||||
=cut
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
|
||||
# would be good to refactor these regex into separate subs and find some
|
||||
# way to compose them in various combinations into multiple subs.
|
||||
# Perhaps via AUTOLOAD where \&auto_X_Y_Z creates a sub that does X, Y, and Z.
|
||||
# The final subs always need to be very fast.
|
||||
#
|
||||
|
||||
sub norm_std_n3 {
|
||||
# my ($h, $method_name) = @_;
|
||||
local $_ = $_;
|
||||
|
||||
s/\b\d+\b/<N>/g; # 42 -> <N>
|
||||
s/\b0x[0-9A-Fa-f]+\b/<N>/g; # 0xFE -> <N>
|
||||
|
||||
s/'.*?'/'<S>'/g; # single quoted strings (doesn't handle escapes)
|
||||
s/".*?"/"<S>"/g; # double quoted strings (doesn't handle escapes)
|
||||
|
||||
# convert names like log20001231 into log<N>
|
||||
s/([a-z_]+)(\d{3,})\b/${1}<N>/ig;
|
||||
|
||||
# abbreviate massive "in (...)" statements and similar
|
||||
s!((\s*<[NS]>\s*,\s*){100,})!sprintf("$2,<repeated %d times>",length($1)/2)!eg;
|
||||
|
||||
return $_;
|
||||
}
|
||||
|
||||
1;
|
|
@ -0,0 +1,897 @@
|
|||
# $Header: /home/timbo/dbi/lib/DBI/RCS/ProxyServer.pm,v 11.9 2003/05/14 11:08:17 timbo Exp $
|
||||
# -*- perl -*-
|
||||
#
|
||||
# DBI::ProxyServer - a proxy server for DBI drivers
|
||||
#
|
||||
# Copyright (c) 1997 Jochen Wiedmann
|
||||
#
|
||||
# The DBD::Proxy module is free software; you can redistribute it and/or
|
||||
# modify it under the same terms as Perl itself. In particular permission
|
||||
# is granted to Tim Bunce for distributing this as a part of the DBI.
|
||||
#
|
||||
#
|
||||
# Author: Jochen Wiedmann
|
||||
# Am Eisteich 9
|
||||
# 72555 Metzingen
|
||||
# Germany
|
||||
#
|
||||
# Email: joe@ispsoft.de
|
||||
# Phone: +49 7123 14881
|
||||
#
|
||||
#
|
||||
##############################################################################
|
||||
|
||||
|
||||
require 5.004;
|
||||
use strict;
|
||||
|
||||
use RPC::PlServer 0.2001;
|
||||
require DBI;
|
||||
require Config;
|
||||
|
||||
|
||||
package DBI::ProxyServer;
|
||||
|
||||
|
||||
|
||||
############################################################################
|
||||
#
|
||||
# Constants
|
||||
#
|
||||
############################################################################
|
||||
|
||||
use vars qw($VERSION @ISA);
|
||||
|
||||
$VERSION = "0.3005";
|
||||
@ISA = qw(RPC::PlServer DBI);
|
||||
|
||||
|
||||
# Most of the options below are set to default values, we note them here
|
||||
# just for the sake of documentation.
|
||||
my %DEFAULT_SERVER_OPTIONS;
|
||||
{
|
||||
my $o = \%DEFAULT_SERVER_OPTIONS;
|
||||
$o->{'chroot'} = undef, # To be used in the initfile,
|
||||
# after loading the required
|
||||
# DBI drivers.
|
||||
$o->{'clients'} =
|
||||
[ { 'mask' => '.*',
|
||||
'accept' => 1,
|
||||
'cipher' => undef
|
||||
}
|
||||
];
|
||||
$o->{'configfile'} = '/etc/dbiproxy.conf' if -f '/etc/dbiproxy.conf';
|
||||
$o->{'debug'} = 0;
|
||||
$o->{'facility'} = 'daemon';
|
||||
$o->{'group'} = undef;
|
||||
$o->{'localaddr'} = undef; # Bind to any local IP number
|
||||
$o->{'localport'} = undef; # Must set port number on the
|
||||
# command line.
|
||||
$o->{'logfile'} = undef; # Use syslog or EventLog.
|
||||
|
||||
# XXX don't restrict methods that can be called (trust users once connected)
|
||||
$o->{'XXX_methods'} = {
|
||||
'DBI::ProxyServer' => {
|
||||
'Version' => 1,
|
||||
'NewHandle' => 1,
|
||||
'CallMethod' => 1,
|
||||
'DestroyHandle' => 1
|
||||
},
|
||||
'DBI::ProxyServer::db' => {
|
||||
'prepare' => 1,
|
||||
'commit' => 1,
|
||||
'rollback' => 1,
|
||||
'STORE' => 1,
|
||||
'FETCH' => 1,
|
||||
'func' => 1,
|
||||
'quote' => 1,
|
||||
'type_info_all' => 1,
|
||||
'table_info' => 1,
|
||||
'disconnect' => 1,
|
||||
},
|
||||
'DBI::ProxyServer::st' => {
|
||||
'execute' => 1,
|
||||
'STORE' => 1,
|
||||
'FETCH' => 1,
|
||||
'func' => 1,
|
||||
'fetch' => 1,
|
||||
'finish' => 1
|
||||
}
|
||||
};
|
||||
if ($Config::Config{'usethreads'} eq 'define') {
|
||||
$o->{'mode'} = 'threads';
|
||||
} elsif ($Config::Config{'d_fork'} eq 'define') {
|
||||
$o->{'mode'} = 'fork';
|
||||
} else {
|
||||
$o->{'mode'} = 'single';
|
||||
}
|
||||
# No pidfile by default, configuration must provide one if needed
|
||||
$o->{'pidfile'} = 'none';
|
||||
$o->{'user'} = undef;
|
||||
};
|
||||
|
||||
|
||||
############################################################################
|
||||
#
|
||||
# Name: Version
|
||||
#
|
||||
# Purpose: Return version string
|
||||
#
|
||||
# Inputs: $class - This class
|
||||
#
|
||||
# Result: Version string; suitable for printing by "--version"
|
||||
#
|
||||
############################################################################
|
||||
|
||||
sub Version {
|
||||
my $version = $DBI::ProxyServer::VERSION;
|
||||
"DBI::ProxyServer $version, Copyright (C) 1998, Jochen Wiedmann";
|
||||
}
|
||||
|
||||
|
||||
############################################################################
|
||||
#
|
||||
# Name: AcceptApplication
|
||||
#
|
||||
# Purpose: Verify DBI DSN
|
||||
#
|
||||
# Inputs: $self - This instance
|
||||
# $dsn - DBI dsn
|
||||
#
|
||||
# Returns: TRUE for a valid DSN, FALSE otherwise
|
||||
#
|
||||
############################################################################
|
||||
|
||||
sub AcceptApplication {
|
||||
my $self = shift; my $dsn = shift;
|
||||
$dsn =~ /^dbi:\w+:/i;
|
||||
}
|
||||
|
||||
|
||||
############################################################################
|
||||
#
|
||||
# Name: AcceptVersion
|
||||
#
|
||||
# Purpose: Verify requested DBI version
|
||||
#
|
||||
# Inputs: $self - Instance
|
||||
# $version - DBI version being requested
|
||||
#
|
||||
# Returns: TRUE for ok, FALSE otherwise
|
||||
#
|
||||
############################################################################
|
||||
|
||||
sub AcceptVersion {
|
||||
my $self = shift; my $version = shift;
|
||||
require DBI;
|
||||
DBI::ProxyServer->init_rootclass();
|
||||
$DBI::VERSION >= $version;
|
||||
}
|
||||
|
||||
|
||||
############################################################################
|
||||
#
|
||||
# Name: AcceptUser
|
||||
#
|
||||
# Purpose: Verify user and password by connecting to the client and
|
||||
# creating a database connection
|
||||
#
|
||||
# Inputs: $self - Instance
|
||||
# $user - User name
|
||||
# $password - Password
|
||||
#
|
||||
############################################################################
|
||||
|
||||
sub AcceptUser {
|
||||
my $self = shift; my $user = shift; my $password = shift;
|
||||
return 0 if (!$self->SUPER::AcceptUser($user, $password));
|
||||
my $dsn = $self->{'application'};
|
||||
$self->Debug("Connecting to $dsn as $user");
|
||||
local $ENV{DBI_AUTOPROXY} = ''; # :-)
|
||||
$self->{'dbh'} = eval {
|
||||
DBI::ProxyServer->connect($dsn, $user, $password,
|
||||
{ 'PrintError' => 0,
|
||||
'Warn' => 0,
|
||||
'RaiseError' => 1,
|
||||
'HandleError' => sub {
|
||||
my $err = $_[1]->err;
|
||||
my $state = $_[1]->state || '';
|
||||
$_[0] .= " [err=$err,state=$state]";
|
||||
return 0;
|
||||
} })
|
||||
};
|
||||
if ($@) {
|
||||
$self->Error("Error while connecting to $dsn as $user: $@");
|
||||
return 0;
|
||||
}
|
||||
[1, $self->StoreHandle($self->{'dbh'}) ];
|
||||
}
|
||||
|
||||
|
||||
sub CallMethod {
|
||||
my $server = shift;
|
||||
my $dbh = $server->{'dbh'};
|
||||
# We could store the private_server attribute permanently in
|
||||
# $dbh. However, we'd have a reference loop in that case and
|
||||
# I would be concerned about garbage collection. :-(
|
||||
$dbh->{'private_server'} = $server;
|
||||
$server->Debug("CallMethod: => " . do { local $^W; join(",", @_)});
|
||||
my @result = eval { $server->SUPER::CallMethod(@_) };
|
||||
my $msg = $@;
|
||||
undef $dbh->{'private_server'};
|
||||
if ($msg) {
|
||||
$server->Debug("CallMethod died with: $@");
|
||||
die $msg;
|
||||
} else {
|
||||
$server->Debug("CallMethod: <= " . do { local $^W; join(",", @result) });
|
||||
}
|
||||
@result;
|
||||
}
|
||||
|
||||
|
||||
sub main {
|
||||
my $server = DBI::ProxyServer->new(\%DEFAULT_SERVER_OPTIONS, \@_);
|
||||
$server->Bind();
|
||||
}
|
||||
|
||||
|
||||
############################################################################
|
||||
#
|
||||
# The DBI part of the proxyserver is implemented as a DBI subclass.
|
||||
# Thus we can reuse some of the DBI methods and overwrite only
|
||||
# those that need additional handling.
|
||||
#
|
||||
############################################################################
|
||||
|
||||
package DBI::ProxyServer::dr;
|
||||
|
||||
@DBI::ProxyServer::dr::ISA = qw(DBI::dr);
|
||||
|
||||
|
||||
package DBI::ProxyServer::db;
|
||||
|
||||
@DBI::ProxyServer::db::ISA = qw(DBI::db);
|
||||
|
||||
sub prepare {
|
||||
my($dbh, $statement, $attr, $params, $proto_ver) = @_;
|
||||
my $server = $dbh->{'private_server'};
|
||||
if (my $client = $server->{'client'}) {
|
||||
if ($client->{'sql'}) {
|
||||
if ($statement =~ /^\s*(\S+)/) {
|
||||
my $st = $1;
|
||||
if (!($statement = $client->{'sql'}->{$st})) {
|
||||
die "Unknown SQL query: $st";
|
||||
}
|
||||
} else {
|
||||
die "Cannot parse restricted SQL statement: $statement";
|
||||
}
|
||||
}
|
||||
}
|
||||
my $sth = $dbh->SUPER::prepare($statement, $attr);
|
||||
my $handle = $server->StoreHandle($sth);
|
||||
|
||||
if ( $proto_ver and $proto_ver > 1 ) {
|
||||
$sth->{private_proxyserver_described} = 0;
|
||||
return $handle;
|
||||
|
||||
} else {
|
||||
# The difference between the usual prepare and ours is that we implement
|
||||
# a combined prepare/execute. The DBD::Proxy driver doesn't call us for
|
||||
# prepare. Only if an execute happens, then we are called with method
|
||||
# "prepare". Further execute's are called as "execute".
|
||||
my @result = $sth->execute($params);
|
||||
my ($NAME, $TYPE);
|
||||
my $NUM_OF_FIELDS = $sth->{NUM_OF_FIELDS};
|
||||
if ($NUM_OF_FIELDS) { # is a SELECT
|
||||
$NAME = $sth->{NAME};
|
||||
$TYPE = $sth->{TYPE};
|
||||
}
|
||||
($handle, $NUM_OF_FIELDS, $sth->{'NUM_OF_PARAMS'},
|
||||
$NAME, $TYPE, @result);
|
||||
}
|
||||
}
|
||||
|
||||
sub table_info {
|
||||
my $dbh = shift;
|
||||
my $sth = $dbh->SUPER::table_info();
|
||||
my $numFields = $sth->{'NUM_OF_FIELDS'};
|
||||
my $names = $sth->{'NAME'};
|
||||
my $types = $sth->{'TYPE'};
|
||||
|
||||
# We wouldn't need to send all the rows at this point, instead we could
|
||||
# make use of $rsth->fetch() on the client as usual.
|
||||
# The problem is that some drivers (namely DBD::ExampleP, DBD::mysql and
|
||||
# DBD::mSQL) are returning foreign sth's here, thus an instance of
|
||||
# DBI::st and not DBI::ProxyServer::st. We could fix this by permitting
|
||||
# the client to execute method DBI::st, but I don't like this.
|
||||
my @rows;
|
||||
while (my ($row) = $sth->fetch()) {
|
||||
last unless defined $row;
|
||||
push(@rows, [@$row]);
|
||||
}
|
||||
($numFields, $names, $types, @rows);
|
||||
}
|
||||
|
||||
|
||||
package DBI::ProxyServer::st;
|
||||
|
||||
@DBI::ProxyServer::st::ISA = qw(DBI::st);
|
||||
|
||||
sub execute {
|
||||
my $sth = shift; my $params = shift; my $proto_ver = shift;
|
||||
my @outParams;
|
||||
if ($params) {
|
||||
for (my $i = 0; $i < @$params;) {
|
||||
my $param = $params->[$i++];
|
||||
if (!ref($param)) {
|
||||
$sth->bind_param($i, $param);
|
||||
}
|
||||
else {
|
||||
if (!ref(@$param[0])) {#It's not a reference
|
||||
$sth->bind_param($i, @$param);
|
||||
}
|
||||
else {
|
||||
$sth->bind_param_inout($i, @$param);
|
||||
my $ref = shift @$param;
|
||||
push(@outParams, $ref);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
my $rows = $sth->SUPER::execute();
|
||||
if ( $proto_ver and $proto_ver > 1 and not $sth->{private_proxyserver_described} ) {
|
||||
my ($NAME, $TYPE);
|
||||
my $NUM_OF_FIELDS = $sth->{NUM_OF_FIELDS};
|
||||
if ($NUM_OF_FIELDS) { # is a SELECT
|
||||
$NAME = $sth->{NAME};
|
||||
$TYPE = $sth->{TYPE};
|
||||
}
|
||||
$sth->{private_proxyserver_described} = 1;
|
||||
# First execution, we ship back description.
|
||||
return ($rows, $NUM_OF_FIELDS, $sth->{'NUM_OF_PARAMS'}, $NAME, $TYPE, @outParams);
|
||||
}
|
||||
($rows, @outParams);
|
||||
}
|
||||
|
||||
sub fetch {
|
||||
my $sth = shift; my $numRows = shift || 1;
|
||||
my($ref, @rows);
|
||||
while ($numRows-- && ($ref = $sth->SUPER::fetch())) {
|
||||
push(@rows, [@$ref]);
|
||||
}
|
||||
@rows;
|
||||
}
|
||||
|
||||
|
||||
1;
|
||||
|
||||
|
||||
__END__
|
||||
|
||||
=head1 NAME
|
||||
|
||||
DBI::ProxyServer - a server for the DBD::Proxy driver
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
use DBI::ProxyServer;
|
||||
DBI::ProxyServer::main(@ARGV);
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
DBI::Proxy Server is a module for implementing a proxy for the DBI proxy
|
||||
driver, DBD::Proxy. It allows access to databases over the network if the
|
||||
DBMS does not offer networked operations. But the proxy server might be
|
||||
useful for you, even if you have a DBMS with integrated network
|
||||
functionality: It can be used as a DBI proxy in a firewalled environment.
|
||||
|
||||
DBI::ProxyServer runs as a daemon on the machine with the DBMS or on the
|
||||
firewall. The client connects to the agent using the DBI driver DBD::Proxy,
|
||||
thus in the exactly same way than using DBD::mysql, DBD::mSQL or any other
|
||||
DBI driver.
|
||||
|
||||
The agent is implemented as a RPC::PlServer application. Thus you have
|
||||
access to all the possibilities of this module, in particular encryption
|
||||
and a similar configuration file. DBI::ProxyServer adds the possibility of
|
||||
query restrictions: You can define a set of queries that a client may
|
||||
execute and restrict access to those. (Requires a DBI driver that supports
|
||||
parameter binding.) See L</CONFIGURATION FILE>.
|
||||
|
||||
The provided driver script, L<dbiproxy>, may either be used as it is or
|
||||
used as the basis for a local version modified to meet your needs.
|
||||
|
||||
=head1 OPTIONS
|
||||
|
||||
When calling the DBI::ProxyServer::main() function, you supply an
|
||||
array of options. These options are parsed by the Getopt::Long module.
|
||||
The ProxyServer inherits all of RPC::PlServer's and hence Net::Daemon's
|
||||
options and option handling, in particular the ability to read
|
||||
options from either the command line or a config file. See
|
||||
L<RPC::PlServer>. See L<Net::Daemon>. Available options include
|
||||
|
||||
=over 4
|
||||
|
||||
=item I<chroot> (B<--chroot=dir>)
|
||||
|
||||
(UNIX only) After doing a bind(), change root directory to the given
|
||||
directory by doing a chroot(). This is useful for security, but it
|
||||
restricts the environment a lot. For example, you need to load DBI
|
||||
drivers in the config file or you have to create hard links to Unix
|
||||
sockets, if your drivers are using them. For example, with MySQL, a
|
||||
config file might contain the following lines:
|
||||
|
||||
my $rootdir = '/var/dbiproxy';
|
||||
my $unixsockdir = '/tmp';
|
||||
my $unixsockfile = 'mysql.sock';
|
||||
foreach $dir ($rootdir, "$rootdir$unixsockdir") {
|
||||
mkdir 0755, $dir;
|
||||
}
|
||||
link("$unixsockdir/$unixsockfile",
|
||||
"$rootdir$unixsockdir/$unixsockfile");
|
||||
require DBD::mysql;
|
||||
|
||||
{
|
||||
'chroot' => $rootdir,
|
||||
...
|
||||
}
|
||||
|
||||
If you don't know chroot(), think of an FTP server where you can see a
|
||||
certain directory tree only after logging in. See also the --group and
|
||||
--user options.
|
||||
|
||||
=item I<clients>
|
||||
|
||||
An array ref with a list of clients. Clients are hash refs, the attributes
|
||||
I<accept> (0 for denying access and 1 for permitting) and I<mask>, a Perl
|
||||
regular expression for the clients IP number or its host name.
|
||||
|
||||
=item I<configfile> (B<--configfile=file>)
|
||||
|
||||
Config files are assumed to return a single hash ref that overrides the
|
||||
arguments of the new method. However, command line arguments in turn take
|
||||
precedence over the config file. See the L<"CONFIGURATION FILE"> section
|
||||
below for details on the config file.
|
||||
|
||||
=item I<debug> (B<--debug>)
|
||||
|
||||
Turn debugging mode on. Mainly this asserts that logging messages of
|
||||
level "debug" are created.
|
||||
|
||||
=item I<facility> (B<--facility=mode>)
|
||||
|
||||
(UNIX only) Facility to use for L<Sys::Syslog>. The default is
|
||||
B<daemon>.
|
||||
|
||||
=item I<group> (B<--group=gid>)
|
||||
|
||||
After doing a bind(), change the real and effective GID to the given.
|
||||
This is useful, if you want your server to bind to a privileged port
|
||||
(<1024), but don't want the server to execute as root. See also
|
||||
the --user option.
|
||||
|
||||
GID's can be passed as group names or numeric values.
|
||||
|
||||
=item I<localaddr> (B<--localaddr=ip>)
|
||||
|
||||
By default a daemon is listening to any IP number that a machine
|
||||
has. This attribute allows one to restrict the server to the given
|
||||
IP number.
|
||||
|
||||
=item I<localport> (B<--localport=port>)
|
||||
|
||||
This attribute sets the port on which the daemon is listening. It
|
||||
must be given somehow, as there's no default.
|
||||
|
||||
=item I<logfile> (B<--logfile=file>)
|
||||
|
||||
Be default logging messages will be written to the syslog (Unix) or
|
||||
to the event log (Windows NT). On other operating systems you need to
|
||||
specify a log file. The special value "STDERR" forces logging to
|
||||
stderr. See L<Net::Daemon::Log> for details.
|
||||
|
||||
=item I<mode> (B<--mode=modename>)
|
||||
|
||||
The server can run in three different modes, depending on the environment.
|
||||
|
||||
If you are running Perl 5.005 and did compile it for threads, then the
|
||||
server will create a new thread for each connection. The thread will
|
||||
execute the server's Run() method and then terminate. This mode is the
|
||||
default, you can force it with "--mode=threads".
|
||||
|
||||
If threads are not available, but you have a working fork(), then the
|
||||
server will behave similar by creating a new process for each connection.
|
||||
This mode will be used automatically in the absence of threads or if
|
||||
you use the "--mode=fork" option.
|
||||
|
||||
Finally there's a single-connection mode: If the server has accepted a
|
||||
connection, he will enter the Run() method. No other connections are
|
||||
accepted until the Run() method returns (if the client disconnects).
|
||||
This operation mode is useful if you have neither threads nor fork(),
|
||||
for example on the Macintosh. For debugging purposes you can force this
|
||||
mode with "--mode=single".
|
||||
|
||||
=item I<pidfile> (B<--pidfile=file>)
|
||||
|
||||
(UNIX only) If this option is present, a PID file will be created at the
|
||||
given location. Default is to not create a pidfile.
|
||||
|
||||
=item I<user> (B<--user=uid>)
|
||||
|
||||
After doing a bind(), change the real and effective UID to the given.
|
||||
This is useful, if you want your server to bind to a privileged port
|
||||
(<1024), but don't want the server to execute as root. See also
|
||||
the --group and the --chroot options.
|
||||
|
||||
UID's can be passed as group names or numeric values.
|
||||
|
||||
=item I<version> (B<--version>)
|
||||
|
||||
Suppresses startup of the server; instead the version string will
|
||||
be printed and the program exits immediately.
|
||||
|
||||
=back
|
||||
|
||||
=head1 SHUTDOWN
|
||||
|
||||
DBI::ProxyServer is built on L<RPC::PlServer> which is, in turn, built on L<Net::Daemon>.
|
||||
|
||||
You should refer to L<Net::Daemon> for how to shutdown the server, except that
|
||||
you can't because it's not currently documented there (as of v0.43).
|
||||
The bottom-line is that it seems that there's no support for graceful shutdown.
|
||||
|
||||
=head1 CONFIGURATION FILE
|
||||
|
||||
The configuration file is just that of I<RPC::PlServer> or I<Net::Daemon>
|
||||
with some additional attributes in the client list.
|
||||
|
||||
The config file is a Perl script. At the top of the file you may include
|
||||
arbitrary Perl source, for example load drivers at the start (useful
|
||||
to enhance performance), prepare a chroot environment and so on.
|
||||
|
||||
The important thing is that you finally return a hash ref of option
|
||||
name/value pairs. The possible options are listed above.
|
||||
|
||||
All possibilities of Net::Daemon and RPC::PlServer apply, in particular
|
||||
|
||||
=over 4
|
||||
|
||||
=item Host and/or User dependent access control
|
||||
|
||||
=item Host and/or User dependent encryption
|
||||
|
||||
=item Changing UID and/or GID after binding to the port
|
||||
|
||||
=item Running in a chroot() environment
|
||||
|
||||
=back
|
||||
|
||||
Additionally the server offers you query restrictions. Suggest the
|
||||
following client list:
|
||||
|
||||
'clients' => [
|
||||
{ 'mask' => '^admin\.company\.com$',
|
||||
'accept' => 1,
|
||||
'users' => [ 'root', 'wwwrun' ],
|
||||
},
|
||||
{
|
||||
'mask' => '^admin\.company\.com$',
|
||||
'accept' => 1,
|
||||
'users' => [ 'root', 'wwwrun' ],
|
||||
'sql' => {
|
||||
'select' => 'SELECT * FROM foo',
|
||||
'insert' => 'INSERT INTO foo VALUES (?, ?, ?)'
|
||||
}
|
||||
}
|
||||
|
||||
then only the users root and wwwrun may connect from admin.company.com,
|
||||
executing arbitrary queries, but only wwwrun may connect from other
|
||||
hosts and is restricted to
|
||||
|
||||
$sth->prepare("select");
|
||||
|
||||
or
|
||||
|
||||
$sth->prepare("insert");
|
||||
|
||||
which in fact are "SELECT * FROM foo" or "INSERT INTO foo VALUES (?, ?, ?)".
|
||||
|
||||
|
||||
=head1 Proxyserver Configuration file (bigger example)
|
||||
|
||||
This section tells you how to restrict a DBI-Proxy: Not every user from
|
||||
every workstation shall be able to execute every query.
|
||||
|
||||
There is a perl program "dbiproxy" which runs on a machine which is able
|
||||
to connect to all the databases we wish to reach. All Perl-DBD-drivers must
|
||||
be installed on this machine. You can also reach databases for which drivers
|
||||
are not available on the machine where you run the program querying the
|
||||
database, e.g. ask MS-Access-database from Linux.
|
||||
|
||||
Create a configuration file "proxy_oracle.cfg" at the dbproxy-server:
|
||||
|
||||
{
|
||||
# This shall run in a shell or a DOS-window
|
||||
# facility => 'daemon',
|
||||
pidfile => 'your_dbiproxy.pid',
|
||||
logfile => 1,
|
||||
debug => 0,
|
||||
mode => 'single',
|
||||
localport => '12400',
|
||||
|
||||
# Access control, the first match in this list wins!
|
||||
# So the order is important
|
||||
clients => [
|
||||
# hint to organize:
|
||||
# the most specialized rules for single machines/users are 1st
|
||||
# then the denying rules
|
||||
# then the rules about whole networks
|
||||
|
||||
# rule: internal_webserver
|
||||
# desc: to get statistical information
|
||||
{
|
||||
# this IP-address only is meant
|
||||
mask => '^10\.95\.81\.243$',
|
||||
# accept (not defer) connections like this
|
||||
accept => 1,
|
||||
# only users from this list
|
||||
# are allowed to log on
|
||||
users => [ 'informationdesk' ],
|
||||
# only this statistical query is allowed
|
||||
# to get results for a web-query
|
||||
sql => {
|
||||
alive => 'select count(*) from dual',
|
||||
statistic_area => 'select count(*) from e01admin.e01e203 where geb_bezei like ?',
|
||||
}
|
||||
},
|
||||
|
||||
# rule: internal_bad_guy_1
|
||||
{
|
||||
mask => '^10\.95\.81\.1$',
|
||||
accept => 0,
|
||||
},
|
||||
|
||||
# rule: employee_workplace
|
||||
# desc: get detailed information
|
||||
{
|
||||
# any IP-address is meant here
|
||||
mask => '^10\.95\.81\.(\d+)$',
|
||||
# accept (not defer) connections like this
|
||||
accept => 1,
|
||||
# only users from this list
|
||||
# are allowed to log on
|
||||
users => [ 'informationdesk', 'lippmann' ],
|
||||
# all these queries are allowed:
|
||||
sql => {
|
||||
search_city => 'select ort_nr, plz, ort from e01admin.e01e200 where plz like ?',
|
||||
search_area => 'select gebiettyp, geb_bezei from e01admin.e01e203 where geb_bezei like ? or geb_bezei like ?',
|
||||
}
|
||||
},
|
||||
|
||||
# rule: internal_bad_guy_2
|
||||
# This does NOT work, because rule "employee_workplace" hits
|
||||
# with its ip-address-mask of the whole network
|
||||
{
|
||||
# don't accept connection from this ip-address
|
||||
mask => '^10\.95\.81\.5$',
|
||||
accept => 0,
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
Start the proxyserver like this:
|
||||
|
||||
rem well-set Oracle_home needed for Oracle
|
||||
set ORACLE_HOME=d:\oracle\ora81
|
||||
dbiproxy --configfile proxy_oracle.cfg
|
||||
|
||||
|
||||
=head2 Testing the connection from a remote machine
|
||||
|
||||
Call a program "dbish" from your commandline. I take the machine from rule "internal_webserver"
|
||||
|
||||
dbish "dbi:Proxy:hostname=oracle.zdf;port=12400;dsn=dbi:Oracle:e01" informationdesk xxx
|
||||
|
||||
There will be a shell-prompt:
|
||||
|
||||
informationdesk@dbi...> alive
|
||||
|
||||
Current statement buffer (enter '/'...):
|
||||
alive
|
||||
|
||||
informationdesk@dbi...> /
|
||||
COUNT(*)
|
||||
'1'
|
||||
[1 rows of 1 fields returned]
|
||||
|
||||
|
||||
=head2 Testing the connection with a perl-script
|
||||
|
||||
Create a perl-script like this:
|
||||
|
||||
# file: oratest.pl
|
||||
# call me like this: perl oratest.pl user password
|
||||
|
||||
use strict;
|
||||
use DBI;
|
||||
|
||||
my $user = shift || die "Usage: $0 user password";
|
||||
my $pass = shift || die "Usage: $0 user password";
|
||||
my $config = {
|
||||
dsn_at_proxy => "dbi:Oracle:e01",
|
||||
proxy => "hostname=oechsle.zdf;port=12400",
|
||||
};
|
||||
my $dsn = sprintf "dbi:Proxy:%s;dsn=%s",
|
||||
$config->{proxy},
|
||||
$config->{dsn_at_proxy};
|
||||
|
||||
my $dbh = DBI->connect( $dsn, $user, $pass )
|
||||
|| die "connect did not work: $DBI::errstr";
|
||||
|
||||
my $sql = "search_city";
|
||||
printf "%s\n%s\n%s\n", "="x40, $sql, "="x40;
|
||||
my $cur = $dbh->prepare($sql);
|
||||
$cur->bind_param(1,'905%');
|
||||
&show_result ($cur);
|
||||
|
||||
my $sql = "search_area";
|
||||
printf "%s\n%s\n%s\n", "="x40, $sql, "="x40;
|
||||
my $cur = $dbh->prepare($sql);
|
||||
$cur->bind_param(1,'Pfarr%');
|
||||
$cur->bind_param(2,'Bronnamberg%');
|
||||
&show_result ($cur);
|
||||
|
||||
my $sql = "statistic_area";
|
||||
printf "%s\n%s\n%s\n", "="x40, $sql, "="x40;
|
||||
my $cur = $dbh->prepare($sql);
|
||||
$cur->bind_param(1,'Pfarr%');
|
||||
&show_result ($cur);
|
||||
|
||||
$dbh->disconnect;
|
||||
exit;
|
||||
|
||||
|
||||
sub show_result {
|
||||
my $cur = shift;
|
||||
unless ($cur->execute()) {
|
||||
print "Could not execute\n";
|
||||
return;
|
||||
}
|
||||
|
||||
my $rownum = 0;
|
||||
while (my @row = $cur->fetchrow_array()) {
|
||||
printf "Row is: %s\n", join(", ",@row);
|
||||
if ($rownum++ > 5) {
|
||||
print "... and so on\n";
|
||||
last;
|
||||
}
|
||||
}
|
||||
$cur->finish;
|
||||
}
|
||||
|
||||
The result
|
||||
|
||||
C:\>perl oratest.pl informationdesk xxx
|
||||
========================================
|
||||
search_city
|
||||
========================================
|
||||
Row is: 3322, 9050, Chemnitz
|
||||
Row is: 3678, 9051, Chemnitz
|
||||
Row is: 10447, 9051, Chemnitz
|
||||
Row is: 12128, 9051, Chemnitz
|
||||
Row is: 10954, 90513, Zirndorf
|
||||
Row is: 5808, 90513, Zirndorf
|
||||
Row is: 5715, 90513, Zirndorf
|
||||
... and so on
|
||||
========================================
|
||||
search_area
|
||||
========================================
|
||||
Row is: 101, Bronnamberg
|
||||
Row is: 400, Pfarramt Zirndorf
|
||||
Row is: 400, Pfarramt Rosstal
|
||||
Row is: 400, Pfarramt Oberasbach
|
||||
Row is: 401, Pfarramt Zirndorf
|
||||
Row is: 401, Pfarramt Rosstal
|
||||
========================================
|
||||
statistic_area
|
||||
========================================
|
||||
DBD::Proxy::st execute failed: Server returned error: Failed to execute method CallMethod: Unknown SQL query: statistic_area at E:/Perl/site/lib/DBI/ProxyServer.pm line 258.
|
||||
Could not execute
|
||||
|
||||
|
||||
=head2 How the configuration works
|
||||
|
||||
The most important section to control access to your dbi-proxy is "client=>"
|
||||
in the file "proxy_oracle.cfg":
|
||||
|
||||
Controlling which person at which machine is allowed to access
|
||||
|
||||
=over 4
|
||||
|
||||
=item * "mask" is a perl regular expression against the plain ip-address of the machine which wishes to connect _or_ the reverse-lookup from a nameserver.
|
||||
|
||||
=item * "accept" tells the dbiproxy-server whether ip-adresse like in "mask" are allowed to connect or not (0/1)
|
||||
|
||||
=item * "users" is a reference to a list of usernames which must be matched, this is NOT a regular expression.
|
||||
|
||||
=back
|
||||
|
||||
Controlling which SQL-statements are allowed
|
||||
|
||||
You can put every SQL-statement you like in simply omitting "sql => ...", but the more important thing is to restrict the connection so that only allowed queries are possible.
|
||||
|
||||
If you include an sql-section in your config-file like this:
|
||||
|
||||
sql => {
|
||||
alive => 'select count(*) from dual',
|
||||
statistic_area => 'select count(*) from e01admin.e01e203 where geb_bezei like ?',
|
||||
}
|
||||
|
||||
The user is allowed to put two queries against the dbi-proxy. The queries are _not_ "select count(*)...", the queries are "alive" and "statistic_area"! These keywords are replaced by the real query. So you can run a query for "alive":
|
||||
|
||||
my $sql = "alive";
|
||||
my $cur = $dbh->prepare($sql);
|
||||
...
|
||||
|
||||
The flexibility is that you can put parameters in the where-part of the query so the query are not static. Simply replace a value in the where-part of the query through a question mark and bind it as a parameter to the query.
|
||||
|
||||
my $sql = "statistic_area";
|
||||
my $cur = $dbh->prepare($sql);
|
||||
$cur->bind_param(1,'905%');
|
||||
# A second parameter would be called like this:
|
||||
# $cur->bind_param(2,'98%');
|
||||
|
||||
The result is this query:
|
||||
|
||||
select count(*) from e01admin.e01e203
|
||||
where geb_bezei like '905%'
|
||||
|
||||
Don't try to put parameters into the sql-query like this:
|
||||
|
||||
# Does not work like you think.
|
||||
# Only the first word of the query is parsed,
|
||||
# so it's changed to "statistic_area", the rest is omitted.
|
||||
# You _have_ to work with $cur->bind_param.
|
||||
my $sql = "statistic_area 905%";
|
||||
my $cur = $dbh->prepare($sql);
|
||||
...
|
||||
|
||||
|
||||
=head2 Problems
|
||||
|
||||
=over 4
|
||||
|
||||
=item * I don't know how to restrict users to special databases.
|
||||
|
||||
=item * I don't know how to pass query-parameters via dbish
|
||||
|
||||
=back
|
||||
|
||||
|
||||
=head1 SECURITY WARNING
|
||||
|
||||
L<RPC::PlServer> used underneath is not secure due to serializing and
|
||||
deserializing data with L<Storable> module. Use the proxy driver only in
|
||||
trusted environment.
|
||||
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Copyright (c) 1997 Jochen Wiedmann
|
||||
Am Eisteich 9
|
||||
72555 Metzingen
|
||||
Germany
|
||||
|
||||
Email: joe@ispsoft.de
|
||||
Phone: +49 7123 14881
|
||||
|
||||
The DBI::ProxyServer module is free software; you can redistribute it
|
||||
and/or modify it under the same terms as Perl itself. In particular
|
||||
permission is granted to Tim Bunce for distributing this as a part of
|
||||
the DBI.
|
||||
|
||||
|
||||
=head1 SEE ALSO
|
||||
|
||||
L<dbiproxy>, L<DBD::Proxy>, L<DBI>, L<RPC::PlServer>,
|
||||
L<RPC::PlClient>, L<Net::Daemon>, L<Net::Daemon::Log>,
|
||||
L<Sys::Syslog>, L<Win32::EventLog>, L<syslog>
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,117 @@
|
|||
package DBI::Util::CacheMemory;
|
||||
|
||||
# $Id: CacheMemory.pm 10314 2007-11-26 22:25:33Z Tim $
|
||||
#
|
||||
# Copyright (c) 2007, Tim Bunce, Ireland
|
||||
#
|
||||
# You may distribute under the terms of either the GNU General Public
|
||||
# License or the Artistic License, as specified in the Perl README file.
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
=head1 NAME
|
||||
|
||||
DBI::Util::CacheMemory - a very fast but very minimal subset of Cache::Memory
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
Like Cache::Memory (part of the Cache distribution) but doesn't support any fancy features.
|
||||
|
||||
This module aims to be a very fast compatible strict sub-set for simple cases,
|
||||
such as basic client-side caching for DBD::Gofer.
|
||||
|
||||
Like Cache::Memory, and other caches in the Cache and Cache::Cache
|
||||
distributions, the data will remain in the cache until cleared, it expires,
|
||||
or the process dies. The cache object simply going out of scope will I<not>
|
||||
destroy the data.
|
||||
|
||||
=head1 METHODS WITH CHANGES
|
||||
|
||||
=head2 new
|
||||
|
||||
All options except C<namespace> are ignored.
|
||||
|
||||
=head2 set
|
||||
|
||||
Doesn't support expiry.
|
||||
|
||||
=head2 purge
|
||||
|
||||
Same as clear() - deletes everything in the namespace.
|
||||
|
||||
=head1 METHODS WITHOUT CHANGES
|
||||
|
||||
=over
|
||||
|
||||
=item clear
|
||||
|
||||
=item count
|
||||
|
||||
=item exists
|
||||
|
||||
=item remove
|
||||
|
||||
=back
|
||||
|
||||
=head1 UNSUPPORTED METHODS
|
||||
|
||||
If it's not listed above, it's not supported.
|
||||
|
||||
=cut
|
||||
|
||||
our $VERSION = "0.010315";
|
||||
|
||||
my %cache;
|
||||
|
||||
sub new {
|
||||
my ($class, %options ) = @_;
|
||||
my $namespace = $options{namespace} ||= 'Default';
|
||||
#$options{_cache} = \%cache; # can be handy for debugging/dumping
|
||||
my $self = bless \%options => $class;
|
||||
$cache{ $namespace } ||= {}; # init - ensure it exists
|
||||
return $self;
|
||||
}
|
||||
|
||||
sub set {
|
||||
my ($self, $key, $value) = @_;
|
||||
$cache{ $self->{namespace} }->{$key} = $value;
|
||||
}
|
||||
|
||||
sub get {
|
||||
my ($self, $key) = @_;
|
||||
return $cache{ $self->{namespace} }->{$key};
|
||||
}
|
||||
|
||||
sub exists {
|
||||
my ($self, $key) = @_;
|
||||
return exists $cache{ $self->{namespace} }->{$key};
|
||||
}
|
||||
|
||||
sub remove {
|
||||
my ($self, $key) = @_;
|
||||
return delete $cache{ $self->{namespace} }->{$key};
|
||||
}
|
||||
|
||||
sub purge {
|
||||
return shift->clear;
|
||||
}
|
||||
|
||||
sub clear {
|
||||
$cache{ shift->{namespace} } = {};
|
||||
}
|
||||
|
||||
sub count {
|
||||
return scalar keys %{ $cache{ shift->{namespace} } };
|
||||
}
|
||||
|
||||
sub size {
|
||||
my $c = $cache{ shift->{namespace} };
|
||||
my $size = 0;
|
||||
while ( my ($k,$v) = each %$c ) {
|
||||
$size += length($k) + length($v);
|
||||
}
|
||||
return $size;
|
||||
}
|
||||
|
||||
1;
|
|
@ -0,0 +1,65 @@
|
|||
package DBI::Util::_accessor;
|
||||
use strict;
|
||||
use Carp;
|
||||
our $VERSION = "0.009479";
|
||||
|
||||
# inspired by Class::Accessor::Fast
|
||||
|
||||
sub new {
|
||||
my($proto, $fields) = @_;
|
||||
my($class) = ref $proto || $proto;
|
||||
$fields ||= {};
|
||||
|
||||
my @dubious = grep { !m/^_/ && !$proto->can($_) } keys %$fields;
|
||||
carp "$class doesn't have accessors for fields: @dubious" if @dubious;
|
||||
|
||||
# make a (shallow) copy of $fields.
|
||||
bless {%$fields}, $class;
|
||||
}
|
||||
|
||||
sub mk_accessors {
|
||||
my($self, @fields) = @_;
|
||||
$self->mk_accessors_using('make_accessor', @fields);
|
||||
}
|
||||
|
||||
sub mk_accessors_using {
|
||||
my($self, $maker, @fields) = @_;
|
||||
my $class = ref $self || $self;
|
||||
|
||||
# So we don't have to do lots of lookups inside the loop.
|
||||
$maker = $self->can($maker) unless ref $maker;
|
||||
|
||||
no strict 'refs';
|
||||
foreach my $field (@fields) {
|
||||
my $accessor = $self->$maker($field);
|
||||
*{$class."\:\:$field"} = $accessor
|
||||
unless defined &{$class."\:\:$field"};
|
||||
}
|
||||
#my $hash_ref = \%{$class."\:\:_accessors_hash};
|
||||
#$hash_ref->{$_}++ for @fields;
|
||||
# XXX also copy down _accessors_hash of base class(es)
|
||||
# so one in this class is complete
|
||||
return;
|
||||
}
|
||||
|
||||
sub make_accessor {
|
||||
my($class, $field) = @_;
|
||||
return sub {
|
||||
my $self = shift;
|
||||
return $self->{$field} unless @_;
|
||||
croak "Too many arguments to $field" if @_ > 1;
|
||||
return $self->{$field} = shift;
|
||||
};
|
||||
}
|
||||
|
||||
sub make_accessor_autoviv_hashref {
|
||||
my($class, $field) = @_;
|
||||
return sub {
|
||||
my $self = shift;
|
||||
return $self->{$field} ||= {} unless @_;
|
||||
croak "Too many arguments to $field" if @_ > 1;
|
||||
return $self->{$field} = shift;
|
||||
};
|
||||
}
|
||||
|
||||
1;
|
|
@ -0,0 +1,181 @@
|
|||
package
|
||||
DBI; # hide this non-DBI package from simple indexers
|
||||
|
||||
# $Id: W32ODBC.pm 8696 2007-01-24 23:12:38Z Tim $
|
||||
#
|
||||
# Copyright (c) 1997,1999 Tim Bunce
|
||||
# With many thanks to Patrick Hollins for polishing.
|
||||
#
|
||||
# You may distribute under the terms of either the GNU General Public
|
||||
# License or the Artistic License, as specified in the Perl README file.
|
||||
|
||||
=head1 NAME
|
||||
|
||||
DBI::W32ODBC - An experimental DBI emulation layer for Win32::ODBC
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
use DBI::W32ODBC;
|
||||
|
||||
# apart from the line above everything is just the same as with
|
||||
# the real DBI when using a basic driver with few features.
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
This is an experimental pure perl DBI emulation layer for Win32::ODBC
|
||||
|
||||
If you can improve this code I'd be interested in hearing about it. If
|
||||
you are having trouble using it please respect the fact that it's very
|
||||
experimental. Ideally fix it yourself and send me the details.
|
||||
|
||||
=head2 Some Things Not Yet Implemented
|
||||
|
||||
Most attributes including PrintError & RaiseError.
|
||||
type_info and table_info
|
||||
|
||||
Volunteers welcome!
|
||||
|
||||
=cut
|
||||
|
||||
${'DBI::VERSION'} # hide version from PAUSE indexer
|
||||
= "0.01";
|
||||
|
||||
my $Revision = sprintf("12.%06d", q$Revision: 8696 $ =~ /(\d+)/o);
|
||||
|
||||
|
||||
sub DBI::W32ODBC::import { } # must trick here since we're called DBI/W32ODBC.pm
|
||||
|
||||
|
||||
use Carp;
|
||||
|
||||
use Win32::ODBC;
|
||||
|
||||
@ISA = qw(Win32::ODBC);
|
||||
|
||||
use strict;
|
||||
|
||||
$DBI::dbi_debug = $ENV{PERL_DBI_DEBUG} || 0;
|
||||
carp "Loaded (W32ODBC) DBI.pm ${'DBI::VERSION'} (debug $DBI::dbi_debug)"
|
||||
if $DBI::dbi_debug;
|
||||
|
||||
|
||||
|
||||
sub connect {
|
||||
my ($class, $dbname, $dbuser, $dbpasswd, $module, $attr) = @_;
|
||||
$dbname .= ";UID=$dbuser" if $dbuser;
|
||||
$dbname .= ";PWD=$dbpasswd" if $dbpasswd;
|
||||
my $h = new Win32::ODBC $dbname;
|
||||
warn "Error connecting to $dbname: ".Win32::ODBC::Error()."\n" unless $h;
|
||||
bless $h, $class if $h; # rebless into our class
|
||||
$h;
|
||||
}
|
||||
|
||||
|
||||
sub quote {
|
||||
my ($h, $string) = @_;
|
||||
return "NULL" if !defined $string;
|
||||
$string =~ s/'/''/g; # standard
|
||||
# This hack seems to be required for Access but probably breaks for
|
||||
# other databases when using \r and \n. It would be better if we could
|
||||
# use ODBC options to detect that we're actually using Access.
|
||||
$string =~ s/\r/' & chr\$(13) & '/g;
|
||||
$string =~ s/\n/' & chr\$(10) & '/g;
|
||||
"'$string'";
|
||||
}
|
||||
|
||||
sub do {
|
||||
my($h, $statement, $attribs, @params) = @_;
|
||||
Carp::carp "\$h->do() attribs unused" if $attribs;
|
||||
my $new_h = $h->prepare($statement) or return undef; ##
|
||||
pop @{ $h->{'___sths'} }; ## certain death assured
|
||||
$new_h->execute(@params) or return undef; ##
|
||||
my $rows = $new_h->rows; ##
|
||||
$new_h->finish; ## bang bang
|
||||
($rows == 0) ? "0E0" : $rows;
|
||||
}
|
||||
|
||||
# ---
|
||||
|
||||
sub prepare {
|
||||
my ($h, $sql) = @_;
|
||||
## opens a new connection with every prepare to allow
|
||||
## multiple, concurrent queries
|
||||
my $new_h = new Win32::ODBC $h->{DSN}; ##
|
||||
return undef if not $new_h; ## bail if no connection
|
||||
bless $new_h; ## shouldn't be sub-classed...
|
||||
$new_h->{'__prepare'} = $sql; ##
|
||||
$new_h->{NAME} = []; ##
|
||||
$new_h->{NUM_OF_FIELDS} = -1; ##
|
||||
push @{ $h->{'___sths'} } ,$new_h; ## save sth in parent for mass destruction
|
||||
return $new_h; ##
|
||||
}
|
||||
|
||||
sub execute {
|
||||
my ($h) = @_;
|
||||
my $rc = $h->Sql($h->{'__prepare'});
|
||||
return undef if $rc;
|
||||
my @fields = $h->FieldNames;
|
||||
$h->{NAME} = \@fields;
|
||||
$h->{NUM_OF_FIELDS} = scalar @fields;
|
||||
$h; # return dbh as pseudo sth
|
||||
}
|
||||
|
||||
|
||||
sub fetchrow_hashref { ## provide DBI compatibility
|
||||
my $h = shift;
|
||||
my $NAME = shift || "NAME";
|
||||
my $row = $h->fetchrow_arrayref or return undef;
|
||||
my %hash;
|
||||
@hash{ @{ $h->{$NAME} } } = @$row;
|
||||
return \%hash;
|
||||
}
|
||||
|
||||
sub fetchrow {
|
||||
my $h = shift;
|
||||
return unless $h->FetchRow();
|
||||
my $fields_r = $h->{NAME};
|
||||
return $h->Data(@$fields_r);
|
||||
}
|
||||
sub fetch {
|
||||
my @row = shift->fetchrow;
|
||||
return undef unless @row;
|
||||
return \@row;
|
||||
}
|
||||
*fetchrow_arrayref = \&fetch; ## provide DBI compatibility
|
||||
*fetchrow_array = \&fetchrow; ## provide DBI compatibility
|
||||
|
||||
sub rows {
|
||||
shift->RowCount;
|
||||
}
|
||||
|
||||
sub finish {
|
||||
shift->Close; ## uncommented this line
|
||||
}
|
||||
|
||||
# ---
|
||||
|
||||
sub commit {
|
||||
shift->Transact(ODBC::SQL_COMMIT);
|
||||
}
|
||||
sub rollback {
|
||||
shift->Transact(ODBC::SQL_ROLLBACK);
|
||||
}
|
||||
|
||||
sub disconnect {
|
||||
my ($h) = shift; ## this will kill all the statement handles
|
||||
foreach (@{$h->{'___sths'}}) { ## created for a specific connection
|
||||
$_->Close if $_->{DSN}; ##
|
||||
} ##
|
||||
$h->Close; ##
|
||||
}
|
||||
|
||||
sub err {
|
||||
(shift->Error)[0];
|
||||
}
|
||||
sub errstr {
|
||||
scalar( shift->Error );
|
||||
}
|
||||
|
||||
# ---
|
||||
|
||||
1;
|
|
@ -0,0 +1,248 @@
|
|||
package # hide this package from CPAN indexer
|
||||
Win32::ODBC;
|
||||
|
||||
use strict;
|
||||
|
||||
use DBI;
|
||||
|
||||
# once we've been loaded we don't want perl to load the real Win32::ODBC
|
||||
$INC{'Win32/ODBC.pm'} = $INC{'Win32/DBIODBC.pm'} || 1;
|
||||
|
||||
#my $db = new Win32::ODBC("DSN=$self->{'DSN'};UID=$self->{'UID'};PWD=$self->{'PWD'};");
|
||||
|
||||
#EMU --- my $db = new Win32::ODBC("DSN=$DSN;UID=$login;PWD=$password;");
|
||||
sub new
|
||||
{
|
||||
shift;
|
||||
my $connect_line= shift;
|
||||
|
||||
# [R] self-hack to allow empty UID and PWD
|
||||
my $temp_connect_line;
|
||||
$connect_line=~/DSN=\w+/;
|
||||
$temp_connect_line="$&;";
|
||||
if ($connect_line=~/UID=\w?/)
|
||||
{$temp_connect_line.="$&;";}
|
||||
else {$temp_connect_line.="UID=;";};
|
||||
if ($connect_line=~/PWD=\w?/)
|
||||
{$temp_connect_line.="$&;";}
|
||||
else {$temp_connect_line.="PWD=;";};
|
||||
$connect_line=$temp_connect_line;
|
||||
# -[R]-
|
||||
|
||||
my $self= {};
|
||||
|
||||
|
||||
$_=$connect_line;
|
||||
/^(DSN=)(.*)(;UID=)(.*)(;PWD=)(.*)(;)$/;
|
||||
|
||||
#---- DBI CONNECTION VARIABLES
|
||||
|
||||
$self->{ODBC_DSN}=$2;
|
||||
$self->{ODBC_UID}=$4;
|
||||
$self->{ODBC_PWD}=$6;
|
||||
|
||||
|
||||
#---- DBI CONNECTION VARIABLES
|
||||
$self->{DBI_DBNAME}=$self->{ODBC_DSN};
|
||||
$self->{DBI_USER}=$self->{ODBC_UID};
|
||||
$self->{DBI_PASSWORD}=$self->{ODBC_PWD};
|
||||
$self->{DBI_DBD}='ODBC';
|
||||
|
||||
#---- DBI CONNECTION
|
||||
$self->{'DBI_DBH'}=DBI->connect($self->{'DBI_DBNAME'},
|
||||
$self->{'DBI_USER'},$self->{'DBI_PASSWORD'},$self->{'DBI_DBD'});
|
||||
|
||||
warn "Error($DBI::err) : $DBI::errstr\n" if ! $self->{'DBI_DBH'};
|
||||
|
||||
|
||||
#---- RETURN
|
||||
|
||||
bless $self;
|
||||
}
|
||||
|
||||
|
||||
#EMU --- $db->Sql('SELECT * FROM DUAL');
|
||||
sub Sql
|
||||
{
|
||||
my $self= shift;
|
||||
my $SQL_statment=shift;
|
||||
|
||||
# print " SQL : $SQL_statment \n";
|
||||
|
||||
$self->{'DBI_SQL_STATMENT'}=$SQL_statment;
|
||||
|
||||
my $dbh=$self->{'DBI_DBH'};
|
||||
|
||||
# print " DBH : $dbh \n";
|
||||
|
||||
my $sth=$dbh->prepare("$SQL_statment");
|
||||
|
||||
# print " STH : $sth \n";
|
||||
|
||||
$self->{'DBI_STH'}=$sth;
|
||||
|
||||
if ($sth)
|
||||
{
|
||||
$sth->execute();
|
||||
}
|
||||
|
||||
#--- GET ERROR MESSAGES
|
||||
$self->{DBI_ERR}=$DBI::err;
|
||||
$self->{DBI_ERRSTR}=$DBI::errstr;
|
||||
|
||||
if ($sth)
|
||||
{
|
||||
#--- GET COLUMNS NAMES
|
||||
$self->{'DBI_NAME'} = $sth->{NAME};
|
||||
}
|
||||
|
||||
# [R] provide compatibility with Win32::ODBC's way of identifying erroneous SQL statements
|
||||
return ($self->{'DBI_ERR'})?1:undef;
|
||||
# -[R]-
|
||||
}
|
||||
|
||||
|
||||
#EMU --- $db->FetchRow())
|
||||
sub FetchRow
|
||||
{
|
||||
my $self= shift;
|
||||
|
||||
my $sth=$self->{'DBI_STH'};
|
||||
if ($sth)
|
||||
{
|
||||
my @row=$sth->fetchrow_array;
|
||||
$self->{'DBI_ROW'}=\@row;
|
||||
|
||||
if (scalar(@row)>0)
|
||||
{
|
||||
#-- the row of result is not nul
|
||||
#-- return something nothing will be return else
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return undef;
|
||||
}
|
||||
|
||||
# [R] provide compatibility with Win32::ODBC's Data() method.
|
||||
sub Data
|
||||
{
|
||||
my $self=shift;
|
||||
my @array=@{$self->{'DBI_ROW'}};
|
||||
foreach my $element (@array)
|
||||
{
|
||||
# remove padding of spaces by DBI
|
||||
$element=~s/(\s*$)//;
|
||||
};
|
||||
return (wantarray())?@array:join('', @array);
|
||||
};
|
||||
# -[R]-
|
||||
|
||||
#EMU --- %record = $db->DataHash;
|
||||
sub DataHash
|
||||
{
|
||||
my $self= shift;
|
||||
|
||||
my $p_name=$self->{'DBI_NAME'};
|
||||
my $p_row=$self->{'DBI_ROW'};
|
||||
|
||||
my @name=@$p_name;
|
||||
my @row=@$p_row;
|
||||
|
||||
my %DataHash;
|
||||
#print @name; print "\n"; print @row;
|
||||
# [R] new code that seems to work consistent with Win32::ODBC
|
||||
while (@name)
|
||||
{
|
||||
my $name=shift(@name);
|
||||
my $value=shift(@row);
|
||||
|
||||
# remove padding of spaces by DBI
|
||||
$name=~s/(\s*$)//;
|
||||
$value=~s/(\s*$)//;
|
||||
|
||||
$DataHash{$name}=$value;
|
||||
};
|
||||
# -[R]-
|
||||
|
||||
# [R] old code that didn't appear to work
|
||||
# foreach my $name (@name)
|
||||
# {
|
||||
# $name=~s/(^\s*)|(\s*$)//;
|
||||
# my @arr=@$name;
|
||||
# foreach (@arr)
|
||||
# {
|
||||
# print "lot $name name col $_ or ROW= 0 $row[0] 1 $row[1] 2 $row[2] \n ";
|
||||
# $DataHash{$name}=shift(@row);
|
||||
# }
|
||||
# }
|
||||
# -[R]-
|
||||
|
||||
#--- Return Hash
|
||||
return %DataHash;
|
||||
}
|
||||
|
||||
|
||||
#EMU --- $db->Error()
|
||||
sub Error
|
||||
{
|
||||
my $self= shift;
|
||||
|
||||
if ($self->{'DBI_ERR'} ne '')
|
||||
{
|
||||
#--- Return error message
|
||||
$self->{'DBI_ERRSTR'};
|
||||
}
|
||||
|
||||
#-- else good no error message
|
||||
|
||||
}
|
||||
|
||||
# [R] provide compatibility with Win32::ODBC's Close() method.
|
||||
sub Close
|
||||
{
|
||||
my $self=shift;
|
||||
|
||||
my $dbh=$self->{'DBI_DBH'};
|
||||
$dbh->disconnect;
|
||||
}
|
||||
# -[R]-
|
||||
|
||||
1;
|
||||
|
||||
__END__
|
||||
|
||||
# [R] to -[R]- indicate sections edited by me, Roy Lee
|
||||
|
||||
=head1 NAME
|
||||
|
||||
Win32::DBIODBC - Win32::ODBC emulation layer for the DBI
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
use Win32::DBIODBC; # instead of use Win32::ODBC
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
This is a I<very> basic I<very> alpha quality Win32::ODBC emulation
|
||||
for the DBI. To use it just replace
|
||||
|
||||
use Win32::ODBC;
|
||||
|
||||
in your scripts with
|
||||
|
||||
use Win32::DBIODBC;
|
||||
|
||||
or, while experimenting, you can pre-load this module without changing your
|
||||
scripts by doing
|
||||
|
||||
perl -MWin32::DBIODBC your_script_name
|
||||
|
||||
=head1 TO DO
|
||||
|
||||
Error handling is virtually non-existent.
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Tom Horen <tho@melexis.com>
|
||||
|
||||
=cut
|
|
@ -0,0 +1,340 @@
|
|||
#!perl -w
|
||||
|
||||
use strict;
|
||||
|
||||
use Test::More tests => 130;
|
||||
use File::Spec;
|
||||
use Config;
|
||||
|
||||
$|=1;
|
||||
|
||||
## ----------------------------------------------------------------------------
|
||||
## 01basic.t - test of some basic DBI functions
|
||||
## ----------------------------------------------------------------------------
|
||||
# Mostly this script takes care of testing the items exported by the 3
|
||||
# tags below (in this order):
|
||||
# - :sql_types
|
||||
# - :squl_cursor_types
|
||||
# - :util
|
||||
# It also then handles some other class methods and functions of DBI, such
|
||||
# as the following:
|
||||
# - $DBI::dbi_debug & its relation to DBI->trace
|
||||
# - DBI->internal
|
||||
# and then tests on that return value:
|
||||
# - $i->debug
|
||||
# - $i->{DebugDispatch}
|
||||
# - $i->{Warn}
|
||||
# - $i->{Attribution}
|
||||
# - $i->{Version}
|
||||
# - $i->{private_test1}
|
||||
# - $i->{cachedKids}
|
||||
# - $i->{Kids}
|
||||
# - $i->{ActiveKids}
|
||||
# - $i->{Active}
|
||||
# - and finally that it will not autovivify
|
||||
# - DBI->available_drivers
|
||||
# - DBI->installed_versions (only for developers)
|
||||
## ----------------------------------------------------------------------------
|
||||
|
||||
## load DBI and export some symbols
|
||||
BEGIN {
|
||||
diag "--- Perl $] on $Config{archname}";
|
||||
|
||||
use_ok('DBI', qw(
|
||||
:sql_types
|
||||
:sql_cursor_types
|
||||
:utils
|
||||
));
|
||||
}
|
||||
|
||||
|
||||
## ----------------------------------------------------------------------------
|
||||
## testing the :sql_types exports
|
||||
|
||||
cmp_ok(SQL_GUID , '==', -11, '... testing sql_type');
|
||||
cmp_ok(SQL_WLONGVARCHAR , '==', -10, '... testing sql_type');
|
||||
cmp_ok(SQL_WVARCHAR , '==', -9, '... testing sql_type');
|
||||
cmp_ok(SQL_WCHAR , '==', -8, '... testing sql_type');
|
||||
cmp_ok(SQL_BIT , '==', -7, '... testing sql_type');
|
||||
cmp_ok(SQL_TINYINT , '==', -6, '... testing sql_type');
|
||||
cmp_ok(SQL_BIGINT , '==', -5, '... testing sql_type');
|
||||
cmp_ok(SQL_LONGVARBINARY , '==', -4, '... testing sql_type');
|
||||
cmp_ok(SQL_VARBINARY , '==', -3, '... testing sql_type');
|
||||
cmp_ok(SQL_BINARY , '==', -2, '... testing sql_type');
|
||||
cmp_ok(SQL_LONGVARCHAR , '==', -1, '... testing sql_type');
|
||||
cmp_ok(SQL_UNKNOWN_TYPE , '==', 0, '... testing sql_type');
|
||||
cmp_ok(SQL_ALL_TYPES , '==', 0, '... testing sql_type');
|
||||
cmp_ok(SQL_CHAR , '==', 1, '... testing sql_type');
|
||||
cmp_ok(SQL_NUMERIC , '==', 2, '... testing sql_type');
|
||||
cmp_ok(SQL_DECIMAL , '==', 3, '... testing sql_type');
|
||||
cmp_ok(SQL_INTEGER , '==', 4, '... testing sql_type');
|
||||
cmp_ok(SQL_SMALLINT , '==', 5, '... testing sql_type');
|
||||
cmp_ok(SQL_FLOAT , '==', 6, '... testing sql_type');
|
||||
cmp_ok(SQL_REAL , '==', 7, '... testing sql_type');
|
||||
cmp_ok(SQL_DOUBLE , '==', 8, '... testing sql_type');
|
||||
cmp_ok(SQL_DATETIME , '==', 9, '... testing sql_type');
|
||||
cmp_ok(SQL_DATE , '==', 9, '... testing sql_type');
|
||||
cmp_ok(SQL_INTERVAL , '==', 10, '... testing sql_type');
|
||||
cmp_ok(SQL_TIME , '==', 10, '... testing sql_type');
|
||||
cmp_ok(SQL_TIMESTAMP , '==', 11, '... testing sql_type');
|
||||
cmp_ok(SQL_VARCHAR , '==', 12, '... testing sql_type');
|
||||
cmp_ok(SQL_BOOLEAN , '==', 16, '... testing sql_type');
|
||||
cmp_ok(SQL_UDT , '==', 17, '... testing sql_type');
|
||||
cmp_ok(SQL_UDT_LOCATOR , '==', 18, '... testing sql_type');
|
||||
cmp_ok(SQL_ROW , '==', 19, '... testing sql_type');
|
||||
cmp_ok(SQL_REF , '==', 20, '... testing sql_type');
|
||||
cmp_ok(SQL_BLOB , '==', 30, '... testing sql_type');
|
||||
cmp_ok(SQL_BLOB_LOCATOR , '==', 31, '... testing sql_type');
|
||||
cmp_ok(SQL_CLOB , '==', 40, '... testing sql_type');
|
||||
cmp_ok(SQL_CLOB_LOCATOR , '==', 41, '... testing sql_type');
|
||||
cmp_ok(SQL_ARRAY , '==', 50, '... testing sql_type');
|
||||
cmp_ok(SQL_ARRAY_LOCATOR , '==', 51, '... testing sql_type');
|
||||
cmp_ok(SQL_MULTISET , '==', 55, '... testing sql_type');
|
||||
cmp_ok(SQL_MULTISET_LOCATOR , '==', 56, '... testing sql_type');
|
||||
cmp_ok(SQL_TYPE_DATE , '==', 91, '... testing sql_type');
|
||||
cmp_ok(SQL_TYPE_TIME , '==', 92, '... testing sql_type');
|
||||
cmp_ok(SQL_TYPE_TIMESTAMP , '==', 93, '... testing sql_type');
|
||||
cmp_ok(SQL_TYPE_TIME_WITH_TIMEZONE , '==', 94, '... testing sql_type');
|
||||
cmp_ok(SQL_TYPE_TIMESTAMP_WITH_TIMEZONE , '==', 95, '... testing sql_type');
|
||||
cmp_ok(SQL_INTERVAL_YEAR , '==', 101, '... testing sql_type');
|
||||
cmp_ok(SQL_INTERVAL_MONTH , '==', 102, '... testing sql_type');
|
||||
cmp_ok(SQL_INTERVAL_DAY , '==', 103, '... testing sql_type');
|
||||
cmp_ok(SQL_INTERVAL_HOUR , '==', 104, '... testing sql_type');
|
||||
cmp_ok(SQL_INTERVAL_MINUTE , '==', 105, '... testing sql_type');
|
||||
cmp_ok(SQL_INTERVAL_SECOND , '==', 106, '... testing sql_type');
|
||||
cmp_ok(SQL_INTERVAL_YEAR_TO_MONTH , '==', 107, '... testing sql_type');
|
||||
cmp_ok(SQL_INTERVAL_DAY_TO_HOUR , '==', 108, '... testing sql_type');
|
||||
cmp_ok(SQL_INTERVAL_DAY_TO_MINUTE , '==', 109, '... testing sql_type');
|
||||
cmp_ok(SQL_INTERVAL_DAY_TO_SECOND , '==', 110, '... testing sql_type');
|
||||
cmp_ok(SQL_INTERVAL_HOUR_TO_MINUTE , '==', 111, '... testing sql_type');
|
||||
cmp_ok(SQL_INTERVAL_HOUR_TO_SECOND , '==', 112, '... testing sql_type');
|
||||
cmp_ok(SQL_INTERVAL_MINUTE_TO_SECOND , '==', 113, '... testing sql_type');
|
||||
|
||||
## ----------------------------------------------------------------------------
|
||||
## testing the :sql_cursor_types exports
|
||||
|
||||
cmp_ok(SQL_CURSOR_FORWARD_ONLY, '==', 0, '... testing sql_cursor_types');
|
||||
cmp_ok(SQL_CURSOR_KEYSET_DRIVEN, '==', 1, '... testing sql_cursor_types');
|
||||
cmp_ok(SQL_CURSOR_DYNAMIC, '==', 2, '... testing sql_cursor_types');
|
||||
cmp_ok(SQL_CURSOR_STATIC, '==', 3, '... testing sql_cursor_types');
|
||||
cmp_ok(SQL_CURSOR_TYPE_DEFAULT, '==', 0, '... testing sql_cursor_types');
|
||||
|
||||
## ----------------------------------------------------------------------------
|
||||
## test the :util exports
|
||||
|
||||
## testing looks_like_number
|
||||
|
||||
my @is_num = looks_like_number(undef, "", "foo", 1, ".", 2, "2");
|
||||
|
||||
ok(!defined $is_num[0], '... looks_like_number : undef -> undef');
|
||||
ok(!defined $is_num[1], '... looks_like_number : "" -> undef (eg "don\'t know")');
|
||||
ok( defined $is_num[2], '... looks_like_number : "foo" -> defined false');
|
||||
ok( !$is_num[2], '... looks_like_number : "foo" -> defined false');
|
||||
ok( $is_num[3], '... looks_like_number : 1 -> true');
|
||||
ok( !$is_num[4], '... looks_like_number : "." -> false');
|
||||
ok( $is_num[5], '... looks_like_number : 1 -> true');
|
||||
ok( $is_num[6], '... looks_like_number : 1 -> true');
|
||||
|
||||
## testing neat
|
||||
|
||||
cmp_ok($DBI::neat_maxlen, '==', 1000, "... $DBI::neat_maxlen initial state is 400");
|
||||
|
||||
is(neat(1 + 1), "2", '... neat : 1 + 1 -> "2"');
|
||||
is(neat("2"), "'2'", '... neat : 2 -> "\'2\'"');
|
||||
is(neat(undef), "undef", '... neat : undef -> "undef"');
|
||||
|
||||
## testing neat_list
|
||||
|
||||
is(neat_list([ 1 + 1, "2", undef, "foobarbaz"], 8, "|"), "2|'2'|undef|'foo...'", '... test array argument w/separator and maxlen');
|
||||
is(neat_list([ 1 + 1, "2", undef, "foobarbaz"]), "2, '2', undef, 'foobarbaz'", '... test array argument w/out separator or maxlen');
|
||||
|
||||
|
||||
## ----------------------------------------------------------------------------
|
||||
## testing DBI functions
|
||||
|
||||
## test DBI->internal
|
||||
|
||||
my $switch = DBI->internal;
|
||||
|
||||
isa_ok($switch, 'DBI::dr');
|
||||
|
||||
## checking attributes of $switch
|
||||
|
||||
# NOTE:
|
||||
# check too see if this covers all the attributes or not
|
||||
|
||||
# TO DO:
|
||||
# these three can be improved
|
||||
$switch->debug(0);
|
||||
pass('... test debug');
|
||||
$switch->{DebugDispatch} = 0; # handled by Switch
|
||||
pass('... test DebugDispatch');
|
||||
$switch->{Warn} = 1; # handled by DBI core
|
||||
pass('... test Warn');
|
||||
|
||||
like($switch->{'Attribution'}, qr/DBI.*? by Tim Bunce/, '... this should say Tim Bunce');
|
||||
|
||||
# is this being presumptious?
|
||||
is($switch->{'Version'}, $DBI::VERSION, '... the version should match DBI version');
|
||||
|
||||
cmp_ok(($switch->{private_test1} = 1), '==', 1, '... this should work and return 1');
|
||||
cmp_ok($switch->{private_test1}, '==', 1, '... this should equal 1');
|
||||
|
||||
is($switch->{CachedKids}, undef, '... CachedKids should be undef initially');
|
||||
my $cache = {};
|
||||
$switch->{CachedKids} = $cache;
|
||||
is($switch->{CachedKids}, $cache, '... CachedKids should be our ref');
|
||||
|
||||
cmp_ok($switch->{Kids}, '==', 0, '... this should be zero');
|
||||
cmp_ok($switch->{ActiveKids}, '==', 0, '... this should be zero');
|
||||
|
||||
ok($switch->{Active}, '... Active flag is true');
|
||||
|
||||
# test attribute warnings
|
||||
{
|
||||
my $warn = "";
|
||||
local $SIG{__WARN__} = sub { $warn .= "@_" };
|
||||
$switch->{FooBarUnknown} = 1;
|
||||
like($warn, qr/Can't set.*FooBarUnknown/, '... we should get a warning here');
|
||||
|
||||
$warn = "";
|
||||
$_ = $switch->{BarFooUnknown};
|
||||
like($warn, qr/Can't get.*BarFooUnknown/, '... we should get a warning here');
|
||||
|
||||
$warn = "";
|
||||
my $dummy = $switch->{$_} for qw(private_foo dbd_foo dbi_foo); # special cases
|
||||
cmp_ok($warn, 'eq', "", '... we should get no warnings here');
|
||||
}
|
||||
|
||||
# is this here for a reason? Are we testing anything?
|
||||
|
||||
$switch->trace_msg("Test \$h->trace_msg text.\n", 1);
|
||||
DBI->trace_msg("Test DBI->trace_msg text.\n", 1);
|
||||
|
||||
## testing DBI->available_drivers
|
||||
|
||||
my @drivers = DBI->available_drivers();
|
||||
cmp_ok(scalar(@drivers), '>', 0, '... we at least have one driver installed');
|
||||
|
||||
# NOTE:
|
||||
# we lowercase the interpolated @drivers array
|
||||
# so that our reg-exp will match on VMS & Win32
|
||||
|
||||
like(lc("@drivers"), qr/examplep/, '... we should at least have ExampleP installed');
|
||||
|
||||
# call available_drivers in scalar context
|
||||
|
||||
my $num_drivers = DBI->available_drivers;
|
||||
cmp_ok($num_drivers, '>', 0, '... we should at least have one driver');
|
||||
|
||||
## testing DBI::hash
|
||||
|
||||
cmp_ok(DBI::hash("foo1" ), '==', -1077531989, '... should be -1077531989');
|
||||
cmp_ok(DBI::hash("foo1",0), '==', -1077531989, '... should be -1077531989');
|
||||
cmp_ok(DBI::hash("foo2",0), '==', -1077531990, '... should be -1077531990');
|
||||
SKIP: {
|
||||
skip("Math::BigInt < 1.56",2)
|
||||
if $DBI::PurePerl && !eval { require Math::BigInt; require_version Math::BigInt 1.56 };
|
||||
skip("Math::BigInt $Math::BigInt::VERSION broken",2)
|
||||
if $DBI::PurePerl && $Math::BigInt::VERSION =~ /^1\.8[45]/;
|
||||
my $bigint_vers = $Math::BigInt::VERSION || "";
|
||||
if (!$DBI::PurePerl) {
|
||||
cmp_ok(DBI::hash("foo1",1), '==', -1263462440);
|
||||
cmp_ok(DBI::hash("foo2",1), '==', -1263462437);
|
||||
}
|
||||
else {
|
||||
# for PurePerl we use Math::BigInt but that's often caused test failures that
|
||||
# aren't DBI's fault. So we just warn (via a skip) if it's not working right.
|
||||
skip("Seems like your Math::BigInt $Math::BigInt::VERSION has a bug",2)
|
||||
unless (DBI::hash("foo1X",1) == -1263462440) && (DBI::hash("foo2",1) == -1263462437);
|
||||
ok(1, "Math::BigInt $Math::BigInt::VERSION worked okay");
|
||||
ok(1);
|
||||
}
|
||||
}
|
||||
|
||||
is(data_string_desc(""), "UTF8 off, ASCII, 0 characters 0 bytes");
|
||||
is(data_string_desc(42), "UTF8 off, ASCII, 2 characters 2 bytes");
|
||||
is(data_string_desc("foo"), "UTF8 off, ASCII, 3 characters 3 bytes");
|
||||
is(data_string_desc(undef), "UTF8 off, undef");
|
||||
is(data_string_desc("bar\x{263a}"), "UTF8 on, non-ASCII, 4 characters 6 bytes");
|
||||
is(data_string_desc("\xEA"), "UTF8 off, non-ASCII, 1 characters 1 bytes");
|
||||
|
||||
is(data_string_diff( "", ""), "");
|
||||
is(data_string_diff( "",undef), "String b is undef, string a has 0 characters");
|
||||
is(data_string_diff(undef,undef), "");
|
||||
is(data_string_diff("aaa","aaa"), "");
|
||||
|
||||
is(data_string_diff("aaa","aba"), "Strings differ at index 1: a[1]=a, b[1]=b");
|
||||
is(data_string_diff("aba","aaa"), "Strings differ at index 1: a[1]=b, b[1]=a");
|
||||
is(data_string_diff("aa" ,"aaa"), "String a truncated after 2 characters");
|
||||
is(data_string_diff("aaa","aa" ), "String b truncated after 2 characters");
|
||||
|
||||
is(data_diff( "", ""), "");
|
||||
is(data_diff(undef,undef), "");
|
||||
is(data_diff("aaa","aaa"), "");
|
||||
|
||||
is(data_diff( "",undef),
|
||||
join "","a: UTF8 off, ASCII, 0 characters 0 bytes\n",
|
||||
"b: UTF8 off, undef\n",
|
||||
"String b is undef, string a has 0 characters\n");
|
||||
is(data_diff("aaa","aba"),
|
||||
join "","a: UTF8 off, ASCII, 3 characters 3 bytes\n",
|
||||
"b: UTF8 off, ASCII, 3 characters 3 bytes\n",
|
||||
"Strings differ at index 1: a[1]=a, b[1]=b\n");
|
||||
is(data_diff(pack("C",0xEA), pack("U",0xEA)),
|
||||
join "", "a: UTF8 off, non-ASCII, 1 characters 1 bytes\n",
|
||||
"b: UTF8 on, non-ASCII, 1 characters 2 bytes\n",
|
||||
"Strings contain the same sequence of characters\n");
|
||||
is(data_diff(pack("C",0xEA), pack("U",0xEA), 1), ""); # no logical difference
|
||||
|
||||
|
||||
## ----------------------------------------------------------------------------
|
||||
# restrict this test to just developers
|
||||
|
||||
SKIP: {
|
||||
skip 'developer tests', 4 unless -d ".svn" || -d ".git";
|
||||
|
||||
if ($^O eq "MSWin32" && eval { require Win32API::File }) {
|
||||
Win32API::File::SetErrorMode(Win32API::File::SEM_FAILCRITICALERRORS());
|
||||
}
|
||||
|
||||
print "Test DBI->installed_versions (for @drivers)\n";
|
||||
print "(If one of those drivers, or the configuration for it, is bad\n";
|
||||
print "then these tests can kill or freeze the process here. That's not the DBI's fault.)\n";
|
||||
$SIG{ALRM} = sub {
|
||||
die "Test aborted because a driver (one of: @drivers) hung while loading"
|
||||
." (almost certainly NOT a DBI problem)";
|
||||
};
|
||||
alarm(20);
|
||||
|
||||
## ----------------------------------------------------------------------------
|
||||
## test installed_versions
|
||||
|
||||
# scalar context
|
||||
my $installed_versions = DBI->installed_versions;
|
||||
|
||||
is(ref($installed_versions), 'HASH', '... we got a hash of installed versions');
|
||||
cmp_ok(scalar(keys(%{$installed_versions})), '>=', 1, '... make sure we have at least one');
|
||||
|
||||
# list context
|
||||
my @installed_drivers = DBI->installed_versions;
|
||||
|
||||
cmp_ok(scalar(@installed_drivers), '>=', 1, '... make sure we got at least one');
|
||||
like("@installed_drivers", qr/Sponge/, '... make sure at least one of them is DBD::Sponge');
|
||||
}
|
||||
|
||||
## testing dbi_debug
|
||||
|
||||
cmp_ok($DBI::dbi_debug, '==', 0, "... DBI::dbi_debug's initial state is 0");
|
||||
|
||||
SKIP: {
|
||||
my $null = File::Spec->devnull();
|
||||
skip "cannot find : $null", 2 unless ($^O eq "MSWin32" || -e $null);
|
||||
|
||||
DBI->trace(15,$null);
|
||||
cmp_ok($DBI::dbi_debug, '==', 15, "... DBI::dbi_debug is 15");
|
||||
DBI->trace(0, undef);
|
||||
cmp_ok($DBI::dbi_debug, '==', 0, "... DBI::dbi_debug is 0");
|
||||
}
|
||||
|
||||
1;
|
|
@ -0,0 +1,264 @@
|
|||
#!perl -w
|
||||
# vim:sw=4:ts=8:et
|
||||
$|=1;
|
||||
|
||||
use strict;
|
||||
|
||||
use Test::More tests => 54;
|
||||
|
||||
## ----------------------------------------------------------------------------
|
||||
## 02dbidrv.t - ...
|
||||
## ----------------------------------------------------------------------------
|
||||
# This test creates a Test Driver (DBD::Test) and then exercises it.
|
||||
# NOTE:
|
||||
# There are a number of tests as well that are embedded within the actual
|
||||
# driver code as well
|
||||
## ----------------------------------------------------------------------------
|
||||
|
||||
## load DBI
|
||||
|
||||
BEGIN {
|
||||
use_ok('DBI');
|
||||
}
|
||||
|
||||
## DBI::_new_drh had an internal limit on a driver class name and crashed.
|
||||
SKIP: {
|
||||
Test::More::skip "running DBI::PurePerl", 1 if $DBI::PurePerl;
|
||||
eval {
|
||||
DBI::_new_drh('DBD::Test::OverLong' . 'x' x 300,
|
||||
{ Name => 'Test', Version => 'Test', }, 42);
|
||||
};
|
||||
like($@, qr/unknown _mem package/, 'Overlong DBD class name is processed');
|
||||
}
|
||||
|
||||
## ----------------------------------------------------------------------------
|
||||
## create a Test Driver (DBD::Test)
|
||||
|
||||
## main Test Driver Package
|
||||
{
|
||||
package DBD::Test;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
my $drh = undef;
|
||||
|
||||
sub driver {
|
||||
return $drh if $drh;
|
||||
|
||||
Test::More::pass('... DBD::Test->driver called to getnew Driver handle');
|
||||
|
||||
my($class, $attr) = @_;
|
||||
$class = "${class}::dr";
|
||||
($drh) = DBI::_new_drh($class, {
|
||||
Name => 'Test',
|
||||
Version => '$Revision: 11.11 $',
|
||||
},
|
||||
77 # 'implementors data'
|
||||
);
|
||||
|
||||
Test::More::ok($drh, "... new Driver handle ($drh) created successfully");
|
||||
Test::More::isa_ok($drh, 'DBI::dr');
|
||||
|
||||
return $drh;
|
||||
}
|
||||
}
|
||||
|
||||
## Test Driver
|
||||
{
|
||||
package DBD::Test::dr;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
$DBD::Test::dr::imp_data_size = 0;
|
||||
|
||||
Test::More::cmp_ok($DBD::Test::dr::imp_data_size, '==', 0, '... check DBD::Test::dr::imp_data_size to avoid typo');
|
||||
|
||||
sub DESTROY { undef }
|
||||
|
||||
sub data_sources {
|
||||
my ($h) = @_;
|
||||
|
||||
Test::More::ok($h, '... Driver object passed to data_sources');
|
||||
Test::More::isa_ok($h, 'DBI::dr');
|
||||
Test::More::ok(!tied $h, '... Driver object is not tied');
|
||||
|
||||
return ("dbi:Test:foo", "dbi:Test:bar");
|
||||
}
|
||||
}
|
||||
|
||||
## Test db package
|
||||
{
|
||||
package DBD::Test::db;
|
||||
|
||||
use strict;
|
||||
|
||||
$DBD::Test::db::imp_data_size = 0;
|
||||
|
||||
Test::More::cmp_ok($DBD::Test::db::imp_data_size, '==', 0, '... check DBD::Test::db::imp_data_size to avoid typo');
|
||||
|
||||
sub do {
|
||||
my $h = shift;
|
||||
|
||||
Test::More::ok($h, '... Database object passed to do');
|
||||
Test::More::isa_ok($h, 'DBI::db');
|
||||
Test::More::ok(!tied $h, '... Database object is not tied');
|
||||
|
||||
my $drh_i = $h->{Driver};
|
||||
|
||||
Test::More::ok($drh_i, '... got Driver object from Database object with Driver attribute');
|
||||
Test::More::isa_ok($drh_i, "DBI::dr");
|
||||
Test::More::ok(!tied %{$drh_i}, '... Driver object is not tied');
|
||||
|
||||
my $drh_o = $h->FETCH('Driver');
|
||||
|
||||
Test::More::ok($drh_o, '... got Driver object from Database object by FETCH-ing Driver attribute');
|
||||
Test::More::isa_ok($drh_o, "DBI::dr");
|
||||
SKIP: {
|
||||
Test::More::skip "running DBI::PurePerl", 1 if $DBI::PurePerl;
|
||||
Test::More::ok(tied %{$drh_o}, '... Driver object is not tied');
|
||||
}
|
||||
|
||||
# return this to make our test pass
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub data_sources {
|
||||
my ($dbh, $attr) = @_;
|
||||
my @ds = $dbh->SUPER::data_sources($attr);
|
||||
|
||||
Test::More::is_deeply((
|
||||
\@ds,
|
||||
[ 'dbi:Test:foo', 'dbi:Test:bar' ]
|
||||
),
|
||||
'... checking fetched datasources from Driver'
|
||||
);
|
||||
|
||||
push @ds, "dbi:Test:baz";
|
||||
return @ds;
|
||||
}
|
||||
|
||||
sub disconnect {
|
||||
shift->STORE(Active => 0);
|
||||
}
|
||||
}
|
||||
|
||||
## ----------------------------------------------------------------------------
|
||||
## test the Driver (DBD::Test)
|
||||
|
||||
$INC{'DBD/Test.pm'} = 'dummy'; # required to fool DBI->install_driver()
|
||||
|
||||
# Note that install_driver should *not* normally be called directly.
|
||||
# This test does so only because it's a test of install_driver!
|
||||
|
||||
my $drh = DBI->install_driver('Test');
|
||||
|
||||
ok($drh, '... got a Test Driver object back from DBI->install_driver');
|
||||
isa_ok($drh, 'DBI::dr');
|
||||
|
||||
cmp_ok(DBI::_get_imp_data($drh), '==', 77, '... checking the DBI::_get_imp_data function');
|
||||
|
||||
my @ds1 = DBI->data_sources("Test");
|
||||
is_deeply((
|
||||
[ @ds1 ],
|
||||
[ 'dbi:Test:foo', 'dbi:Test:bar' ]
|
||||
), '... got correct datasources from DBI->data_sources("Test")'
|
||||
);
|
||||
|
||||
SKIP: {
|
||||
skip "Kids attribute not supported under DBI::PurePerl", 1 if $DBI::PurePerl;
|
||||
|
||||
cmp_ok($drh->{Kids}, '==', 0, '... this Driver does not yet have any Kids');
|
||||
}
|
||||
|
||||
# create scope to test $dbh DESTROY behaviour
|
||||
do {
|
||||
|
||||
my $dbh = $drh->connect;
|
||||
|
||||
ok($dbh, '... got a database handle from calling $drh->connect');
|
||||
isa_ok($dbh, 'DBI::db');
|
||||
|
||||
SKIP: {
|
||||
skip "Kids attribute not supported under DBI::PurePerl", 1 if $DBI::PurePerl;
|
||||
|
||||
cmp_ok($drh->{Kids}, '==', 1, '... this Driver does not yet have any Kids');
|
||||
}
|
||||
|
||||
my @ds2 = $dbh->data_sources();
|
||||
is_deeply((
|
||||
[ @ds2 ],
|
||||
[ 'dbi:Test:foo', 'dbi:Test:bar', 'dbi:Test:baz' ]
|
||||
), '... got correct datasources from $dbh->data_sources()'
|
||||
);
|
||||
|
||||
ok($dbh->do('dummy'), '... this will trigger more driver internal tests above in DBD::Test::db');
|
||||
|
||||
$dbh->disconnect;
|
||||
|
||||
$drh->set_err("41", "foo 41 drh");
|
||||
cmp_ok($drh->err, '==', 41, '... checking Driver handle err set with set_err method');
|
||||
$dbh->set_err("42", "foo 42 dbh");
|
||||
cmp_ok($dbh->err, '==', 42, '... checking Database handle err set with set_err method');
|
||||
cmp_ok($drh->err, '==', 41, '... checking Database handle err set with Driver handle set_err method');
|
||||
|
||||
};
|
||||
|
||||
SKIP: {
|
||||
skip "Kids attribute not supported under DBI::PurePerl", 1 if $DBI::PurePerl;
|
||||
|
||||
cmp_ok($drh->{Kids}, '==', 0, '... this Driver does not yet have any Kids')
|
||||
or $drh->dump_handle("bad Kids",3);
|
||||
}
|
||||
|
||||
# copied up to drh from dbh when dbh was DESTROYd
|
||||
cmp_ok($drh->err, '==', 42, '... $dbh->DESTROY should set $drh->err to 42');
|
||||
|
||||
$drh->set_err("99", "foo");
|
||||
cmp_ok($DBI::err, '==', 99, '... checking $DBI::err set with Driver handle set_err method');
|
||||
is($DBI::errstr, "foo 42 dbh [err was 42 now 99]\nfoo", '... checking $DBI::errstr');
|
||||
|
||||
$drh->default_user("",""); # just to reset err etc
|
||||
$drh->set_err(1, "errmsg", "00000");
|
||||
is($DBI::state, "", '... checking $DBI::state');
|
||||
|
||||
$drh->set_err(1, "test error 1");
|
||||
is($DBI::state, 'S1000', '... checking $DBI::state');
|
||||
|
||||
$drh->set_err(2, "test error 2", "IM999");
|
||||
is($DBI::state, 'IM999', '... checking $DBI::state');
|
||||
|
||||
SKIP: {
|
||||
skip "using DBI::PurePerl", 1 if $DBI::PurePerl;
|
||||
eval {
|
||||
$DBI::rows = 1
|
||||
};
|
||||
like($@, qr/Can't modify/, '... trying to assign to $DBI::rows should throw an excpetion'); #'
|
||||
}
|
||||
|
||||
is($drh->{FetchHashKeyName}, 'NAME', '... FetchHashKeyName is NAME');
|
||||
$drh->{FetchHashKeyName} = 'NAME_lc';
|
||||
is($drh->{FetchHashKeyName}, 'NAME_lc', '... FetchHashKeyName is now changed to NAME_lc');
|
||||
|
||||
ok(!$drh->disconnect_all, '... calling $drh->disconnect_all (not implemented but will fail silently)');
|
||||
|
||||
ok defined $drh->dbixs_revision, 'has dbixs_revision';
|
||||
ok($drh->dbixs_revision =~ m/^\d+$/, 'has integer dbixs_revision');
|
||||
|
||||
SKIP: {
|
||||
skip "using DBI::PurePerl", 5 if $DBI::PurePerl;
|
||||
my $can = $drh->can('FETCH');
|
||||
|
||||
ok($can, '... $drh can FETCH');
|
||||
is(ref($can), "CODE", '... and it returned a proper CODE ref');
|
||||
|
||||
my $name = $can->($drh, "Name");
|
||||
|
||||
ok($name, '... used FETCH returned from can to fetch the Name attribute');
|
||||
is($name, "Test", '... the Name attribute is equal to Test');
|
||||
|
||||
ok(!$drh->can('disconnect_all'), '... ');
|
||||
}
|
||||
|
||||
1;
|
|
@ -0,0 +1,410 @@
|
|||
#!perl -w
|
||||
$|=1;
|
||||
|
||||
use strict;
|
||||
|
||||
use Test::More tests => 137;
|
||||
|
||||
## ----------------------------------------------------------------------------
|
||||
## 03handle.t - tests handles
|
||||
## ----------------------------------------------------------------------------
|
||||
# This set of tests exercises the different handles; Driver, Database and
|
||||
# Statement in various ways, in particular in their interactions with one
|
||||
# another
|
||||
## ----------------------------------------------------------------------------
|
||||
|
||||
BEGIN {
|
||||
use_ok( 'DBI' );
|
||||
}
|
||||
|
||||
# installed drivers should start empty
|
||||
my %drivers = DBI->installed_drivers();
|
||||
is(scalar keys %drivers, 0);
|
||||
|
||||
## ----------------------------------------------------------------------------
|
||||
# get the Driver handle
|
||||
|
||||
my $driver = "ExampleP";
|
||||
|
||||
my $drh = DBI->install_driver($driver);
|
||||
isa_ok( $drh, 'DBI::dr' );
|
||||
|
||||
SKIP: {
|
||||
skip "Kids attribute not supported under DBI::PurePerl", 1 if $DBI::PurePerl;
|
||||
|
||||
cmp_ok($drh->{Kids}, '==', 0, '... this Driver does not yet have any Kids');
|
||||
}
|
||||
|
||||
# now the driver should be registered
|
||||
%drivers = DBI->installed_drivers();
|
||||
is(scalar keys %drivers, 1);
|
||||
ok(exists $drivers{ExampleP});
|
||||
ok($drivers{ExampleP}->isa('DBI::dr'));
|
||||
|
||||
my $using_dbd_gofer = ($ENV{DBI_AUTOPROXY}||'') =~ /^dbi:Gofer.*transport=/i;
|
||||
|
||||
## ----------------------------------------------------------------------------
|
||||
# do database handle tests inside do BLOCK to capture scope
|
||||
|
||||
do {
|
||||
my $dbh = DBI->connect("dbi:$driver:", '', '');
|
||||
isa_ok($dbh, 'DBI::db');
|
||||
|
||||
my $drh = $dbh->{Driver}; # (re)get drh here so tests can work using_dbd_gofer
|
||||
|
||||
SKIP: {
|
||||
skip "Kids and ActiveKids attributes not supported under DBI::PurePerl", 2 if $DBI::PurePerl;
|
||||
|
||||
cmp_ok($drh->{Kids}, '==', 1, '... our Driver has one Kid');
|
||||
cmp_ok($drh->{ActiveKids}, '==', 1, '... our Driver has one ActiveKid');
|
||||
}
|
||||
|
||||
my $sql = "select name from ?";
|
||||
|
||||
my $sth1 = $dbh->prepare_cached($sql);
|
||||
isa_ok($sth1, 'DBI::st');
|
||||
ok($sth1->execute("."), '... execute ran successfully');
|
||||
|
||||
my $ck = $dbh->{CachedKids};
|
||||
is(ref($ck), "HASH", '... we got the CachedKids hash');
|
||||
|
||||
cmp_ok(scalar(keys(%{$ck})), '==', 1, '... there is one CachedKid');
|
||||
ok(eq_set(
|
||||
[ values %{$ck} ],
|
||||
[ $sth1 ]
|
||||
),
|
||||
'... our statement handle should be in the CachedKids');
|
||||
|
||||
ok($sth1->{Active}, '... our first statement is Active');
|
||||
|
||||
{
|
||||
my $warn = 0; # use this to check that we are warned
|
||||
local $SIG{__WARN__} = sub { ++$warn if $_[0] =~ /still active/i };
|
||||
|
||||
my $sth2 = $dbh->prepare_cached($sql);
|
||||
isa_ok($sth2, 'DBI::st');
|
||||
|
||||
is($sth1, $sth2, '... prepare_cached returned the same statement handle');
|
||||
cmp_ok($warn,'==', 1, '... we got warned about our first statement handle being still active');
|
||||
|
||||
ok(!$sth1->{Active}, '... our first statement is no longer Active since we re-prepared it');
|
||||
|
||||
my $sth3 = $dbh->prepare_cached($sql, { foo => 1 });
|
||||
isa_ok($sth3, 'DBI::st');
|
||||
|
||||
isnt($sth1, $sth3, '... prepare_cached returned a different statement handle now');
|
||||
cmp_ok(scalar(keys(%{$ck})), '==', 2, '... there are two CachedKids');
|
||||
ok(eq_set(
|
||||
[ values %{$ck} ],
|
||||
[ $sth1, $sth3 ]
|
||||
),
|
||||
'... both statement handles should be in the CachedKids');
|
||||
|
||||
ok($sth1->execute("."), '... executing first statement handle again');
|
||||
ok($sth1->{Active}, '... first statement handle is now active again');
|
||||
|
||||
my $sth4 = $dbh->prepare_cached($sql, undef, 3);
|
||||
isa_ok($sth4, 'DBI::st');
|
||||
|
||||
isnt($sth1, $sth4, '... our fourth statement handle is not the same as our first');
|
||||
ok($sth1->{Active}, '... first statement handle is still active');
|
||||
|
||||
cmp_ok(scalar(keys(%{$ck})), '==', 2, '... there are two CachedKids');
|
||||
ok(eq_set(
|
||||
[ values %{$ck} ],
|
||||
[ $sth2, $sth4 ]
|
||||
),
|
||||
'... second and fourth statement handles should be in the CachedKids');
|
||||
|
||||
$sth1->finish;
|
||||
ok(!$sth1->{Active}, '... first statement handle is no longer active');
|
||||
|
||||
ok($sth4->execute("."), '... fourth statement handle executed properly');
|
||||
ok($sth4->{Active}, '... fourth statement handle is Active');
|
||||
|
||||
my $sth5 = $dbh->prepare_cached($sql, undef, 1);
|
||||
isa_ok($sth5, 'DBI::st');
|
||||
|
||||
cmp_ok($warn, '==', 1, '... we still only got one warning');
|
||||
|
||||
is($sth4, $sth5, '... fourth statement handle and fifth one match');
|
||||
ok(!$sth4->{Active}, '... fourth statement handle is not Active');
|
||||
ok(!$sth5->{Active}, '... fifth statement handle is not Active (shouldnt be its the same as fifth)');
|
||||
|
||||
cmp_ok(scalar(keys(%{$ck})), '==', 2, '... there are two CachedKids');
|
||||
ok(eq_set(
|
||||
[ values %{$ck} ],
|
||||
[ $sth2, $sth5 ]
|
||||
),
|
||||
'... second and fourth/fifth statement handles should be in the CachedKids');
|
||||
}
|
||||
|
||||
SKIP: {
|
||||
skip "swap_inner_handle() not supported under DBI::PurePerl", 23 if $DBI::PurePerl;
|
||||
|
||||
my $sth6 = $dbh->prepare($sql);
|
||||
$sth6->execute(".");
|
||||
my $sth1_driver_name = $sth1->{Database}{Driver}{Name};
|
||||
|
||||
ok( $sth6->{Active}, '... sixth statement handle is active');
|
||||
ok(!$sth1->{Active}, '... first statement handle is not active');
|
||||
|
||||
ok($sth1->swap_inner_handle($sth6), '... first statement handle becomes the sixth');
|
||||
ok(!$sth6->{Active}, '... sixth statement handle is now not active');
|
||||
ok( $sth1->{Active}, '... first statement handle is now active again');
|
||||
|
||||
ok($sth1->swap_inner_handle($sth6), '... first statement handle becomes the sixth');
|
||||
ok( $sth6->{Active}, '... sixth statement handle is active');
|
||||
ok(!$sth1->{Active}, '... first statement handle is not active');
|
||||
|
||||
ok($sth1->swap_inner_handle($sth6), '... first statement handle becomes the sixth');
|
||||
ok(!$sth6->{Active}, '... sixth statement handle is now not active');
|
||||
ok( $sth1->{Active}, '... first statement handle is now active again');
|
||||
|
||||
$sth1->{PrintError} = 0;
|
||||
ok(!$sth1->swap_inner_handle($dbh), '... can not swap a sth with a dbh');
|
||||
cmp_ok( $sth1->errstr, 'eq', "Can't swap_inner_handle between sth and dbh");
|
||||
|
||||
ok($sth1->swap_inner_handle($sth6), '... first statement handle becomes the sixth');
|
||||
ok( $sth6->{Active}, '... sixth statement handle is active');
|
||||
ok(!$sth1->{Active}, '... first statement handle is not active');
|
||||
|
||||
$sth6->finish;
|
||||
|
||||
ok(my $dbh_nullp = DBI->connect("dbi:NullP:", undef, undef, { go_bypass => 1 }));
|
||||
ok(my $sth7 = $dbh_nullp->prepare(""));
|
||||
|
||||
$sth1->{PrintError} = 0;
|
||||
ok(!$sth1->swap_inner_handle($sth7), "... can't swap_inner_handle with handle from different parent");
|
||||
cmp_ok( $sth1->errstr, 'eq', "Can't swap_inner_handle with handle from different parent");
|
||||
|
||||
cmp_ok( $sth1->{Database}{Driver}{Name}, 'eq', $sth1_driver_name );
|
||||
ok( $sth1->swap_inner_handle($sth7,1), "... can swap to different parent if forced");
|
||||
cmp_ok( $sth1->{Database}{Driver}{Name}, 'eq', "NullP" );
|
||||
|
||||
$dbh_nullp->disconnect;
|
||||
}
|
||||
|
||||
ok( $dbh->ping, 'ping should be true before disconnect');
|
||||
$dbh->disconnect;
|
||||
$dbh->{PrintError} = 0; # silence 'not connected' warning
|
||||
ok( !$dbh->ping, 'ping should be false after disconnect');
|
||||
|
||||
SKIP: {
|
||||
skip "Kids and ActiveKids attributes not supported under DBI::PurePerl", 2 if $DBI::PurePerl;
|
||||
|
||||
cmp_ok($drh->{Kids}, '==', 1, '... our Driver has one Kid after disconnect');
|
||||
cmp_ok($drh->{ActiveKids}, '==', 0, '... our Driver has no ActiveKids after disconnect');
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
if ($using_dbd_gofer) {
|
||||
$drh->{CachedKids} = {};
|
||||
}
|
||||
|
||||
# make sure our driver has no more kids after this test
|
||||
# NOTE:
|
||||
# this also assures us that the next test has an empty slate as well
|
||||
SKIP: {
|
||||
skip "Kids attribute not supported under DBI::PurePerl", 1 if $DBI::PurePerl;
|
||||
|
||||
cmp_ok($drh->{Kids}, '==', 0, "... our $drh->{Name} driver should have 0 Kids after dbh was destoryed");
|
||||
}
|
||||
|
||||
## ----------------------------------------------------------------------------
|
||||
# handle reference leak tests
|
||||
|
||||
# NOTE:
|
||||
# this test checks for reference leaks by testing the Kids attribute
|
||||
# which is not supported by DBI::PurePerl, so we just do not run this
|
||||
# for DBI::PurePerl all together. Even though some of the tests would
|
||||
# pass, it does not make sense because in the end, what is actually
|
||||
# being tested for will give a false positive
|
||||
|
||||
sub work {
|
||||
my (%args) = @_;
|
||||
my $dbh = DBI->connect("dbi:$driver:", '', '');
|
||||
isa_ok( $dbh, 'DBI::db' );
|
||||
|
||||
cmp_ok($drh->{Kids}, '==', 1, '... the Driver should have 1 Kid(s) now');
|
||||
|
||||
if ( $args{Driver} ) {
|
||||
isa_ok( $dbh->{Driver}, 'DBI::dr' );
|
||||
} else {
|
||||
pass( "not testing Driver here" );
|
||||
}
|
||||
|
||||
my $sth = $dbh->prepare_cached("select name from ?");
|
||||
isa_ok( $sth, 'DBI::st' );
|
||||
|
||||
if ( $args{Database} ) {
|
||||
isa_ok( $sth->{Database}, 'DBI::db' );
|
||||
} else {
|
||||
pass( "not testing Database here" );
|
||||
}
|
||||
|
||||
$dbh->disconnect;
|
||||
# both handles should be freed here
|
||||
}
|
||||
|
||||
SKIP: {
|
||||
skip "Kids attribute not supported under DBI::PurePerl", 25 if $DBI::PurePerl;
|
||||
skip "drh Kids not testable under DBD::Gofer", 25 if $using_dbd_gofer;
|
||||
|
||||
foreach my $args (
|
||||
{},
|
||||
{ Driver => 1 },
|
||||
{ Database => 1 },
|
||||
{ Driver => 1, Database => 1 },
|
||||
) {
|
||||
work( %{$args} );
|
||||
cmp_ok($drh->{Kids}, '==', 0, '... the Driver should have no Kids');
|
||||
}
|
||||
|
||||
# make sure we have no kids when we end this
|
||||
cmp_ok($drh->{Kids}, '==', 0, '... the Driver should have no Kids at the end of this test');
|
||||
}
|
||||
|
||||
## ----------------------------------------------------------------------------
|
||||
# handle take_imp_data test
|
||||
|
||||
SKIP: {
|
||||
skip "take_imp_data test not supported under DBD::Gofer", 19 if $using_dbd_gofer;
|
||||
|
||||
my $dbh = DBI->connect("dbi:$driver:", '', '');
|
||||
isa_ok($dbh, "DBI::db");
|
||||
my $drh = $dbh->{Driver}; # (re)get drh here so tests can work using_dbd_gofer
|
||||
|
||||
cmp_ok($drh->{Kids}, '==', 1, '... our Driver should have 1 Kid(s) here')
|
||||
unless $DBI::PurePerl && pass();
|
||||
|
||||
$dbh->prepare("select name from ?"); # destroyed at once
|
||||
my $sth2 = $dbh->prepare("select name from ?"); # inactive
|
||||
my $sth3 = $dbh->prepare("select name from ?"); # active:
|
||||
$sth3->execute(".");
|
||||
is $sth3->{Active}, 1;
|
||||
is $dbh->{ActiveKids}, 1
|
||||
unless $DBI::PurePerl && pass();
|
||||
|
||||
my $ChildHandles = $dbh->{ChildHandles};
|
||||
|
||||
skip "take_imp_data test needs weakrefs", 15 if not $ChildHandles;
|
||||
|
||||
ok $ChildHandles, 'we need weakrefs for take_imp_data to work safely with child handles';
|
||||
is @$ChildHandles, 3, 'should have 3 entries (implementation detail)';
|
||||
is grep({ defined } @$ChildHandles), 2, 'should have 2 defined handles';
|
||||
|
||||
my $imp_data = $dbh->take_imp_data;
|
||||
ok($imp_data, '... we got some imp_data to test');
|
||||
# generally length($imp_data) = 112 for 32bit, 116 for 64 bit
|
||||
# (as of DBI 1.37) but it can differ on some platforms
|
||||
# depending on structure packing by the compiler
|
||||
# so we just test that it's something reasonable:
|
||||
cmp_ok(length($imp_data), '>=', 80, '... test that our imp_data is greater than or equal to 80, this is reasonable');
|
||||
|
||||
cmp_ok($drh->{Kids}, '==', 0, '... our Driver should have 0 Kid(s) after calling take_imp_data');
|
||||
|
||||
is ref $sth3, 'DBI::zombie', 'sth should be reblessed';
|
||||
eval { $sth3->finish };
|
||||
like $@, qr/Can't locate object method/;
|
||||
|
||||
{
|
||||
my @warn;
|
||||
local $SIG{__WARN__} = sub { push @warn, $_[0] if $_[0] =~ /after take_imp_data/; print "warn: @_\n"; };
|
||||
|
||||
my $drh = $dbh->{Driver};
|
||||
ok(!defined $drh, '... our Driver should be undefined');
|
||||
|
||||
my $trace_level = $dbh->{TraceLevel};
|
||||
ok(!defined $trace_level, '... our TraceLevel should be undefined');
|
||||
|
||||
ok(!defined $dbh->disconnect, '... disconnect should return undef');
|
||||
|
||||
ok(!defined $dbh->quote(42), '... quote should return undefined');
|
||||
|
||||
cmp_ok(scalar @warn, '==', 4, '... we should have gotten 4 warnings');
|
||||
}
|
||||
|
||||
my $dbh2 = DBI->connect("dbi:$driver:", '', '', { dbi_imp_data => $imp_data });
|
||||
isa_ok($dbh2, "DBI::db");
|
||||
# need a way to test dbi_imp_data has been used
|
||||
|
||||
cmp_ok($drh->{Kids}, '==', 1, '... our Driver should have 1 Kid(s) again')
|
||||
unless $DBI::PurePerl && pass();
|
||||
|
||||
}
|
||||
|
||||
# we need this SKIP block on its own since we are testing the
|
||||
# destruction of objects within the scope of the above SKIP
|
||||
# block
|
||||
SKIP: {
|
||||
skip "Kids attribute not supported under DBI::PurePerl", 1 if $DBI::PurePerl;
|
||||
|
||||
cmp_ok($drh->{Kids}, '==', 0, '... our Driver has no Kids after this test');
|
||||
}
|
||||
|
||||
## ----------------------------------------------------------------------------
|
||||
# NullP statement handle attributes without execute
|
||||
|
||||
my $driver2 = "NullP";
|
||||
|
||||
my $drh2 = DBI->install_driver($driver);
|
||||
isa_ok( $drh2, 'DBI::dr' );
|
||||
|
||||
SKIP: {
|
||||
skip "Kids attribute not supported under DBI::PurePerl", 1 if $DBI::PurePerl;
|
||||
|
||||
cmp_ok($drh2->{Kids}, '==', 0, '... our Driver (2) has no Kids before this test');
|
||||
}
|
||||
|
||||
do {
|
||||
my $dbh = DBI->connect("dbi:$driver2:", '', '');
|
||||
isa_ok($dbh, "DBI::db");
|
||||
|
||||
my $sth = $dbh->prepare("foo bar");
|
||||
isa_ok($sth, "DBI::st");
|
||||
|
||||
cmp_ok($sth->{NUM_OF_PARAMS}, '==', 0, '... NUM_OF_PARAMS is 0');
|
||||
is($sth->{NUM_OF_FIELDS}, undef, '... NUM_OF_FIELDS should be undef');
|
||||
is($sth->{Statement}, "foo bar", '... Statement is "foo bar"');
|
||||
|
||||
ok(!defined $sth->{NAME}, '... NAME is undefined');
|
||||
ok(!defined $sth->{TYPE}, '... TYPE is undefined');
|
||||
ok(!defined $sth->{SCALE}, '... SCALE is undefined');
|
||||
ok(!defined $sth->{PRECISION}, '... PRECISION is undefined');
|
||||
ok(!defined $sth->{NULLABLE}, '... NULLABLE is undefined');
|
||||
ok(!defined $sth->{RowsInCache}, '... RowsInCache is undefined');
|
||||
ok(!defined $sth->{ParamValues}, '... ParamValues is undefined');
|
||||
# derived NAME attributes
|
||||
ok(!defined $sth->{NAME_uc}, '... NAME_uc is undefined');
|
||||
ok(!defined $sth->{NAME_lc}, '... NAME_lc is undefined');
|
||||
ok(!defined $sth->{NAME_hash}, '... NAME_hash is undefined');
|
||||
ok(!defined $sth->{NAME_uc_hash}, '... NAME_uc_hash is undefined');
|
||||
ok(!defined $sth->{NAME_lc_hash}, '... NAME_lc_hash is undefined');
|
||||
|
||||
my $dbh_ref = ref($dbh);
|
||||
my $sth_ref = ref($sth);
|
||||
|
||||
ok($dbh_ref->can("prepare"), '... $dbh can call "prepare"');
|
||||
ok(!$dbh_ref->can("nonesuch"), '... $dbh cannot call "nonesuch"');
|
||||
ok($sth_ref->can("execute"), '... $sth can call "execute"');
|
||||
|
||||
# what is this test for??
|
||||
|
||||
# I don't know why this warning has the "(perhaps ...)" suffix, it shouldn't:
|
||||
# Can't locate object method "nonesuch" via package "DBI::db" (perhaps you forgot to load "DBI::db"?)
|
||||
eval { ref($dbh)->nonesuch; };
|
||||
|
||||
$dbh->disconnect;
|
||||
};
|
||||
|
||||
SKIP: {
|
||||
skip "Kids attribute not supported under DBI::PurePerl", 1 if $DBI::PurePerl;
|
||||
|
||||
cmp_ok($drh2->{Kids}, '==', 0, '... our Driver (2) has no Kids after this test');
|
||||
}
|
||||
|
||||
## ----------------------------------------------------------------------------
|
||||
|
||||
1;
|
|
@ -0,0 +1,59 @@
|
|||
#!perl -w
|
||||
$|=1;
|
||||
|
||||
use strict;
|
||||
|
||||
use Test::More tests => 12;
|
||||
|
||||
## ----------------------------------------------------------------------------
|
||||
## 04mods.t - ...
|
||||
## ----------------------------------------------------------------------------
|
||||
# Note:
|
||||
# the modules tested here are all marked as new and not guaranteed, so this if
|
||||
# they change, these will fail.
|
||||
## ----------------------------------------------------------------------------
|
||||
|
||||
BEGIN {
|
||||
use_ok( 'DBI' );
|
||||
|
||||
# load these first, since the other two load them
|
||||
# and we want to catch the error first
|
||||
use_ok( 'DBI::Const::GetInfo::ANSI' );
|
||||
use_ok( 'DBI::Const::GetInfo::ODBC' );
|
||||
|
||||
use_ok( 'DBI::Const::GetInfoType', qw(%GetInfoType) );
|
||||
use_ok( 'DBI::Const::GetInfoReturn', qw(%GetInfoReturnTypes %GetInfoReturnValues) );
|
||||
}
|
||||
|
||||
## test GetInfoType
|
||||
|
||||
cmp_ok(scalar(keys(%GetInfoType)), '>', 1, '... we have at least one key in the GetInfoType hash');
|
||||
|
||||
is_deeply(
|
||||
\%GetInfoType,
|
||||
{ %DBI::Const::GetInfo::ANSI::InfoTypes, %DBI::Const::GetInfo::ODBC::InfoTypes },
|
||||
'... the GetInfoType hash is constructed from the ANSI and ODBC hashes'
|
||||
);
|
||||
|
||||
## test GetInfoReturnTypes
|
||||
|
||||
cmp_ok(scalar(keys(%GetInfoReturnTypes)), '>', 1, '... we have at least one key in the GetInfoReturnType hash');
|
||||
|
||||
is_deeply(
|
||||
\%GetInfoReturnTypes,
|
||||
{ %DBI::Const::GetInfo::ANSI::ReturnTypes, %DBI::Const::GetInfo::ODBC::ReturnTypes },
|
||||
'... the GetInfoReturnType hash is constructed from the ANSI and ODBC hashes'
|
||||
);
|
||||
|
||||
## test GetInfoReturnValues
|
||||
|
||||
cmp_ok(scalar(keys(%GetInfoReturnValues)), '>', 1, '... we have at least one key in the GetInfoReturnValues hash');
|
||||
|
||||
# ... testing GetInfoReturnValues any further would be difficult
|
||||
|
||||
## test the two methods found in DBI::Const::GetInfoReturn
|
||||
|
||||
can_ok('DBI::Const::GetInfoReturn', 'Format');
|
||||
can_ok('DBI::Const::GetInfoReturn', 'Explain');
|
||||
|
||||
1;
|
|
@ -0,0 +1,190 @@
|
|||
# Before `make install' is performed this script should be runnable with
|
||||
# `make test'. After `make install' it should work as `perl CatHash.t'
|
||||
|
||||
#########################
|
||||
|
||||
# change 'tests => 1' to 'tests => last_test_to_print';
|
||||
|
||||
use strict;
|
||||
use Benchmark qw(:all);
|
||||
use Scalar::Util qw(looks_like_number);
|
||||
no warnings 'uninitialized';
|
||||
|
||||
use Test::More tests => 41;
|
||||
|
||||
BEGIN { use_ok('DBI') };
|
||||
|
||||
# null and undefs -- segfaults?;
|
||||
is (DBI::_concat_hash_sorted(undef, "=", ":", 0, undef), undef);
|
||||
is (DBI::_concat_hash_sorted({ }, "=", ":", 0, undef), "");
|
||||
eval { DBI::_concat_hash_sorted([], "=", ":", 0, undef) };
|
||||
like ($@ || "", qr/is not a hash reference/);
|
||||
is (DBI::_concat_hash_sorted({ }, undef, ":", 0, undef), "");
|
||||
is (DBI::_concat_hash_sorted({ }, "=", undef, 0, undef), "");
|
||||
is (DBI::_concat_hash_sorted({ }, "=", ":", undef, undef),"");
|
||||
|
||||
# simple cases
|
||||
is (DBI::_concat_hash_sorted({ 1=>"a", 2=>"b" }, "=", ", ", undef, undef), "1='a', 2='b'");
|
||||
# nul byte in key sep and pair sep
|
||||
# (nul byte in hash not supported)
|
||||
is DBI::_concat_hash_sorted({ 1=>"a", 2=>"b" }, "=\000=", ":\000:", undef, undef),
|
||||
"1=\000='a':\000:2=\000='b'", 'should work with nul bytes in kv_sep and pair_sep';
|
||||
is DBI::_concat_hash_sorted({ 1=>"a\000a", 2=>"b" }, "=", ":", 1, undef),
|
||||
"1='a.a':2='b'", 'should work with nul bytes in hash value (neat)';
|
||||
is DBI::_concat_hash_sorted({ 1=>"a\000a", 2=>"b" }, "=", ":", 0, undef),
|
||||
"1='a\000a':2='b'", 'should work with nul bytes in hash value (not neat)';
|
||||
|
||||
# Simple stress tests
|
||||
# limit stress when performing automated testing
|
||||
# eg http://www.nntp.perl.org/group/perl.cpan.testers/2009/06/msg4374116.html
|
||||
my $stress = $ENV{AUTOMATED_TESTING} ? 1_000 : 10_000;
|
||||
ok(DBI::_concat_hash_sorted({bob=>'two', fred=>'one' }, "="x$stress, ":", 1, undef));
|
||||
ok(DBI::_concat_hash_sorted({bob=>'two', fred=>'one' }, "=", ":"x$stress, 1, undef));
|
||||
ok(DBI::_concat_hash_sorted({map {$_=>undef} (1..1000)}, "="x$stress, ":", 1, undef));
|
||||
ok(DBI::_concat_hash_sorted({map {$_=>undef} (1..1000)}, "=", ":"x$stress, 1, undef), 'test');
|
||||
ok(DBI::_concat_hash_sorted({map {$_=>undef} (1..100)}, "="x$stress, ":"x$stress, 1, undef), 'test');
|
||||
|
||||
my $simple_hash = {
|
||||
bob=>"there",
|
||||
jack=>12,
|
||||
fred=>"there",
|
||||
norman=>"there",
|
||||
# sam =>undef
|
||||
};
|
||||
|
||||
my $simple_numeric = {
|
||||
1=>"there",
|
||||
2=>"there",
|
||||
16 => 'yo',
|
||||
07 => "buddy",
|
||||
49 => undef,
|
||||
};
|
||||
|
||||
my $simple_mixed = {
|
||||
bob=>"there",
|
||||
jack=>12,
|
||||
fred=>"there",
|
||||
sam =>undef,
|
||||
1=>"there",
|
||||
32=>"there",
|
||||
16 => 'yo',
|
||||
07 => "buddy",
|
||||
49 => undef,
|
||||
};
|
||||
|
||||
my $simple_float = {
|
||||
1.12 =>"there",
|
||||
3.1415926 =>"there",
|
||||
32=>"there",
|
||||
1.6 => 'yo',
|
||||
0.78 => "buddy",
|
||||
49 => undef,
|
||||
};
|
||||
|
||||
#eval {
|
||||
# DBI::_concat_hash_sorted($simple_hash, "=",,":",1,12);
|
||||
#};
|
||||
ok(1," Unknown sort order");
|
||||
#like ($@, qr/Unknown sort order/, "Unknown sort order");
|
||||
|
||||
|
||||
|
||||
## Loopify and Add Neat
|
||||
|
||||
|
||||
my %neats = (
|
||||
"Neat"=>0,
|
||||
"Not Neat"=> 1
|
||||
);
|
||||
my %sort_types = (
|
||||
guess=>undef,
|
||||
numeric => 1,
|
||||
lexical=> 0
|
||||
);
|
||||
my %hashes = (
|
||||
Numeric=>$simple_numeric,
|
||||
"Simple Hash" => $simple_hash,
|
||||
"Mixed Hash" => $simple_mixed,
|
||||
"Float Hash" => $simple_float
|
||||
);
|
||||
|
||||
for my $sort_type (keys %sort_types){
|
||||
for my $neat (keys %neats) {
|
||||
for my $hash (keys %hashes) {
|
||||
test_concat_hash($hash, $neat, $sort_type);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sub test_concat_hash {
|
||||
my ($hash, $neat, $sort_type) = @_;
|
||||
my @args = ($hashes{$hash}, "=", ":",$neats{$neat}, $sort_types{$sort_type});
|
||||
is (
|
||||
DBI::_concat_hash_sorted(@args),
|
||||
_concat_hash_sorted(@args),
|
||||
"$hash - $neat $sort_type"
|
||||
);
|
||||
}
|
||||
|
||||
if (0) {
|
||||
eval {
|
||||
cmpthese(200_000, {
|
||||
Perl => sub {_concat_hash_sorted($simple_hash, "=", ":",0,undef); },
|
||||
C=> sub {DBI::_concat_hash_sorted($simple_hash, "=", ":",0,1);}
|
||||
});
|
||||
|
||||
print "\n";
|
||||
cmpthese(200_000, {
|
||||
NotNeat => sub {DBI::_concat_hash_sorted(
|
||||
$simple_hash, "=", ":",1,undef);
|
||||
},
|
||||
Neat => sub {DBI::_concat_hash_sorted(
|
||||
$simple_hash, "=", ":",0,undef);
|
||||
}
|
||||
});
|
||||
};
|
||||
}
|
||||
#CatHash::_concat_hash_values({ }, ":-",,"::",1,1);
|
||||
|
||||
|
||||
sub _concat_hash_sorted {
|
||||
my ( $hash_ref, $kv_separator, $pair_separator, $use_neat, $num_sort ) = @_;
|
||||
# $num_sort: 0=lexical, 1=numeric, undef=try to guess
|
||||
|
||||
return undef unless defined $hash_ref;
|
||||
die "hash is not a hash reference" unless ref $hash_ref eq 'HASH';
|
||||
my $keys = _get_sorted_hash_keys($hash_ref, $num_sort);
|
||||
my $string = '';
|
||||
for my $key (@$keys) {
|
||||
$string .= $pair_separator if length $string > 0;
|
||||
my $value = $hash_ref->{$key};
|
||||
if ($use_neat) {
|
||||
$value = DBI::neat($value, 0);
|
||||
}
|
||||
else {
|
||||
$value = (defined $value) ? "'$value'" : 'undef';
|
||||
}
|
||||
$string .= $key . $kv_separator . $value;
|
||||
}
|
||||
return $string;
|
||||
}
|
||||
|
||||
sub _get_sorted_hash_keys {
|
||||
my ($hash_ref, $sort_type) = @_;
|
||||
if (not defined $sort_type) {
|
||||
my $sort_guess = 1;
|
||||
$sort_guess = (not looks_like_number($_)) ? 0 : $sort_guess
|
||||
for keys %$hash_ref;
|
||||
$sort_type = $sort_guess;
|
||||
}
|
||||
|
||||
my @keys = keys %$hash_ref;
|
||||
no warnings 'numeric';
|
||||
my @sorted = ($sort_type)
|
||||
? sort { $a <=> $b or $a cmp $b } @keys
|
||||
: sort @keys;
|
||||
#warn "$sort_type = @sorted\n";
|
||||
return \@sorted;
|
||||
}
|
||||
|
||||
1;
|
|
@ -0,0 +1,334 @@
|
|||
#!perl -w
|
||||
|
||||
use strict;
|
||||
use Storable qw(dclone);
|
||||
|
||||
use Test::More;
|
||||
|
||||
## ----------------------------------------------------------------------------
|
||||
## 06attrs.t - ...
|
||||
## ----------------------------------------------------------------------------
|
||||
# This test checks the parameters and the values associated with them for
|
||||
# the three different handles (Driver, Database, Statement)
|
||||
## ----------------------------------------------------------------------------
|
||||
|
||||
BEGIN {
|
||||
use_ok( 'DBI' )
|
||||
}
|
||||
|
||||
$|=1;
|
||||
|
||||
my $using_autoproxy = ($ENV{DBI_AUTOPROXY});
|
||||
my $dsn = 'dbi:ExampleP:dummy';
|
||||
|
||||
# Connect to the example driver.
|
||||
my $dbh = DBI->connect($dsn, '', '', {
|
||||
PrintError => 0, RaiseError => 1,
|
||||
});
|
||||
|
||||
isa_ok( $dbh, 'DBI::db' );
|
||||
|
||||
# Clean up when we're done.
|
||||
END { $dbh->disconnect if $dbh };
|
||||
|
||||
## ----------------------------------------------------------------------------
|
||||
# Check the database handle attributes.
|
||||
|
||||
# bit flag attr
|
||||
ok( $dbh->{Warn}, '... checking Warn attribute for dbh');
|
||||
ok( $dbh->{Active}, '... checking Active attribute for dbh');
|
||||
ok( $dbh->{AutoCommit}, '... checking AutoCommit attribute for dbh');
|
||||
ok(!$dbh->{CompatMode}, '... checking CompatMode attribute for dbh');
|
||||
ok(!$dbh->{InactiveDestroy}, '... checking InactiveDestroy attribute for dbh');
|
||||
ok(!$dbh->{AutoInactiveDestroy}, '... checking AutoInactiveDestroy attribute for dbh');
|
||||
ok(!$dbh->{PrintError}, '... checking PrintError attribute for dbh');
|
||||
ok( $dbh->{PrintWarn}, '... checking PrintWarn attribute for dbh'); # true because of perl -w above
|
||||
ok( $dbh->{RaiseError}, '... checking RaiseError attribute for dbh');
|
||||
ok(!$dbh->{RaiseWarn}, '... checking RaiseWarn attribute for dbh');
|
||||
ok(!$dbh->{ShowErrorStatement}, '... checking ShowErrorStatement attribute for dbh');
|
||||
ok(!$dbh->{ChopBlanks}, '... checking ChopBlanks attribute for dbh');
|
||||
ok(!$dbh->{LongTruncOk}, '... checking LongTrunkOk attribute for dbh');
|
||||
ok(!$dbh->{TaintIn}, '... checking TaintIn attribute for dbh');
|
||||
ok(!$dbh->{TaintOut}, '... checking TaintOut attribute for dbh');
|
||||
ok(!$dbh->{Taint}, '... checking Taint attribute for dbh');
|
||||
ok(!$dbh->{Executed}, '... checking Executed attribute for dbh');
|
||||
|
||||
# other attr
|
||||
cmp_ok($dbh->{ErrCount}, '==', 0, '... checking ErrCount attribute for dbh');
|
||||
|
||||
SKIP: {
|
||||
skip "Kids and ActiveKids attribute not supported under DBI::PurePerl", 2 if $DBI::PurePerl;
|
||||
|
||||
cmp_ok($dbh->{Kids}, '==', 0, '... checking Kids attribute for dbh');;
|
||||
cmp_ok($dbh->{ActiveKids}, '==', 0, '... checking ActiveKids attribute for dbh');;
|
||||
}
|
||||
|
||||
is($dbh->{CachedKids}, undef, '... checking CachedKids attribute for dbh');
|
||||
ok(!defined $dbh->{HandleError}, '... checking HandleError attribute for dbh');
|
||||
ok(!defined $dbh->{Profile}, '... checking Profile attribute for dbh');
|
||||
ok(!defined $dbh->{Statement}, '... checking Statement attribute for dbh');
|
||||
ok(!defined $dbh->{RowCacheSize}, '... checking RowCacheSize attribute for dbh');
|
||||
ok(!defined $dbh->{ReadOnly}, '... checking ReadOnly attribute for dbh');
|
||||
|
||||
is($dbh->{FetchHashKeyName}, 'NAME', '... checking FetchHashKeyName attribute for dbh');
|
||||
is($dbh->{Name}, 'dummy', '... checking Name attribute for dbh') # fails for Multiplex
|
||||
unless $using_autoproxy && ok(1);
|
||||
|
||||
cmp_ok($dbh->{TraceLevel}, '==', $DBI::dbi_debug & 0xF, '... checking TraceLevel attribute for dbh');
|
||||
cmp_ok($dbh->{LongReadLen}, '==', 80, '... checking LongReadLen attribute for dbh');
|
||||
|
||||
is_deeply [ $dbh->FETCH_many(qw(HandleError FetchHashKeyName LongReadLen ErrCount)) ],
|
||||
[ undef, qw(NAME 80 0) ], 'should be able to FETCH_many';
|
||||
|
||||
is $dbh->{examplep_private_dbh_attrib}, 42, 'should see driver-private dbh attribute value';
|
||||
is delete $dbh->{examplep_private_dbh_attrib}, 42, 'delete on non-private attribute acts like fetch';
|
||||
is $dbh->{examplep_private_dbh_attrib}, 42, 'value unchanged after delete';
|
||||
|
||||
$dbh->{private_foo} = 42;
|
||||
is $dbh->{private_foo}, 42, 'should see private_foo dbh attribute value';
|
||||
is delete $dbh->{private_foo}, 42, 'delete should return private_foo dbh attribute value';
|
||||
is $dbh->{private_foo}, undef, 'value of private_foo after delete should be undef';
|
||||
|
||||
# Raise an error.
|
||||
eval {
|
||||
$dbh->do('select foo from foo')
|
||||
};
|
||||
like($@, qr/^DBD::\w+::db do failed: Unknown field names: foo/ , '... catching exception');
|
||||
|
||||
ok(defined $dbh->err, '... $dbh->err is undefined');
|
||||
like($dbh->errstr, qr/^Unknown field names: foo\b/, '... checking $dbh->errstr');
|
||||
|
||||
is($dbh->state, 'S1000', '... checking $dbh->state');
|
||||
|
||||
ok($dbh->{Executed}, '... checking Executed attribute for dbh'); # even though it failed
|
||||
$dbh->{Executed} = 0; # reset(able)
|
||||
cmp_ok($dbh->{Executed}, '==', 0, '... checking Executed attribute for dbh (after reset)');
|
||||
|
||||
cmp_ok($dbh->{ErrCount}, '==', 1, '... checking ErrCount attribute for dbh (after error was generated)');
|
||||
|
||||
## ----------------------------------------------------------------------------
|
||||
# Test the driver handle attributes.
|
||||
|
||||
my $drh = $dbh->{Driver};
|
||||
isa_ok( $drh, 'DBI::dr' );
|
||||
|
||||
ok($dbh->err, '... checking $dbh->err');
|
||||
|
||||
cmp_ok($drh->{ErrCount}, '==', 0, '... checking ErrCount attribute for drh');
|
||||
|
||||
ok( $drh->{Warn}, '... checking Warn attribute for drh');
|
||||
ok( $drh->{Active}, '... checking Active attribute for drh');
|
||||
ok( $drh->{AutoCommit}, '... checking AutoCommit attribute for drh');
|
||||
ok(!$drh->{CompatMode}, '... checking CompatMode attribute for drh');
|
||||
ok(!$drh->{InactiveDestroy}, '... checking InactiveDestroy attribute for drh');
|
||||
ok(!$drh->{AutoInactiveDestroy}, '... checking AutoInactiveDestroy attribute for drh');
|
||||
ok(!$drh->{PrintError}, '... checking PrintError attribute for drh');
|
||||
ok( $drh->{PrintWarn}, '... checking PrintWarn attribute for drh'); # true because of perl -w above
|
||||
ok(!$drh->{RaiseError}, '... checking RaiseError attribute for drh');
|
||||
ok(!$dbh->{RaiseWarn}, '... checking RaiseWarn attribute for dbh');
|
||||
ok(!$drh->{ShowErrorStatement}, '... checking ShowErrorStatement attribute for drh');
|
||||
ok(!$drh->{ChopBlanks}, '... checking ChopBlanks attribute for drh');
|
||||
ok(!$drh->{LongTruncOk}, '... checking LongTrunkOk attribute for drh');
|
||||
ok(!$drh->{TaintIn}, '... checking TaintIn attribute for drh');
|
||||
ok(!$drh->{TaintOut}, '... checking TaintOut attribute for drh');
|
||||
ok(!$drh->{Taint}, '... checking Taint attribute for drh');
|
||||
|
||||
SKIP: {
|
||||
skip "Executed attribute not supported under DBI::PurePerl", 1 if $DBI::PurePerl;
|
||||
|
||||
ok($drh->{Executed}, '... checking Executed attribute for drh') # due to the do() above
|
||||
}
|
||||
|
||||
SKIP: {
|
||||
skip "Kids and ActiveKids attribute not supported under DBI::PurePerl", 2 if ($DBI::PurePerl or $dbh->{mx_handle_list});
|
||||
cmp_ok($drh->{Kids}, '==', 1, '... checking Kids attribute for drh');
|
||||
cmp_ok($drh->{ActiveKids}, '==', 1, '... checking ActiveKids attribute for drh');
|
||||
}
|
||||
|
||||
is($drh->{CachedKids}, undef, '... checking CachedKids attribute for drh');
|
||||
ok(!defined $drh->{HandleError}, '... checking HandleError attribute for drh');
|
||||
ok(!defined $drh->{Profile}, '... checking Profile attribute for drh');
|
||||
ok(!defined $drh->{ReadOnly}, '... checking ReadOnly attribute for drh');
|
||||
|
||||
cmp_ok($drh->{TraceLevel}, '==', $DBI::dbi_debug & 0xF, '... checking TraceLevel attribute for drh');
|
||||
cmp_ok($drh->{LongReadLen}, '==', 80, '... checking LongReadLen attribute for drh');
|
||||
|
||||
is($drh->{FetchHashKeyName}, 'NAME', '... checking FetchHashKeyName attribute for drh');
|
||||
is($drh->{Name}, 'ExampleP', '... checking Name attribute for drh')
|
||||
unless $using_autoproxy && ok(1);
|
||||
|
||||
## ----------------------------------------------------------------------------
|
||||
# Test the statement handle attributes.
|
||||
|
||||
# Create a statement handle.
|
||||
my $sth = $dbh->prepare("select ctime, name from ?");
|
||||
isa_ok($sth, "DBI::st");
|
||||
|
||||
ok(!$sth->{Executed}, '... checking Executed attribute for sth');
|
||||
ok(!$dbh->{Executed}, '... checking Executed attribute for dbh');
|
||||
cmp_ok($sth->{ErrCount}, '==', 0, '... checking ErrCount attribute for sth');
|
||||
|
||||
# Trigger an exception.
|
||||
eval {
|
||||
$sth->execute("foo")
|
||||
};
|
||||
# we don't check actual opendir error msg because of locale differences
|
||||
like($@, qr/^DBD::\w+::st execute failed: .*opendir\(foo\): /msi, '... checking exception');
|
||||
|
||||
# Test all of the statement handle attributes.
|
||||
like($sth->errstr, qr/opendir\(foo\): /, '... checking $sth->errstr');
|
||||
is($sth->state, 'S1000', '... checking $sth->state');
|
||||
ok($sth->{Executed}, '... checking Executed attribute for sth'); # even though it failed
|
||||
ok($dbh->{Executed}, '... checking Exceuted attribute for dbh'); # due to $sth->prepare, even though it failed
|
||||
|
||||
cmp_ok($sth->{ErrCount}, '==', 1, '... checking ErrCount attribute for sth');
|
||||
|
||||
$sth->{ErrCount} = 0;
|
||||
cmp_ok($sth->{ErrCount}, '==', 0, '... checking ErrCount attribute for sth (after reset)');
|
||||
|
||||
# booleans
|
||||
ok( $sth->{Warn}, '... checking Warn attribute for sth');
|
||||
ok(!$sth->{Active}, '... checking Active attribute for sth');
|
||||
ok(!$sth->{CompatMode}, '... checking CompatMode attribute for sth');
|
||||
ok(!$sth->{InactiveDestroy}, '... checking InactiveDestroy attribute for sth');
|
||||
ok(!$sth->{AutoInactiveDestroy}, '... checking AutoInactiveDestroy attribute for sth');
|
||||
ok(!$sth->{PrintError}, '... checking PrintError attribute for sth');
|
||||
ok( $sth->{PrintWarn}, '... checking PrintWarn attribute for sth');
|
||||
ok( $sth->{RaiseError}, '... checking RaiseError attribute for sth');
|
||||
ok(!$dbh->{RaiseWarn}, '... checking RaiseWarn attribute for dbh');
|
||||
ok(!$sth->{ShowErrorStatement}, '... checking ShowErrorStatement attribute for sth');
|
||||
ok(!$sth->{ChopBlanks}, '... checking ChopBlanks attribute for sth');
|
||||
ok(!$sth->{LongTruncOk}, '... checking LongTrunkOk attribute for sth');
|
||||
ok(!$sth->{TaintIn}, '... checking TaintIn attribute for sth');
|
||||
ok(!$sth->{TaintOut}, '... checking TaintOut attribute for sth');
|
||||
ok(!$sth->{Taint}, '... checking Taint attribute for sth');
|
||||
|
||||
# common attr
|
||||
SKIP: {
|
||||
skip "Kids and ActiveKids attribute not supported under DBI::PurePerl", 2 if $DBI::PurePerl;
|
||||
cmp_ok($sth->{Kids}, '==', 0, '... checking Kids attribute for sth');
|
||||
cmp_ok($sth->{ActiveKids}, '==', 0, '... checking ActiveKids attribute for sth');
|
||||
}
|
||||
|
||||
ok(!defined $sth->{CachedKids}, '... checking CachedKids attribute for sth');
|
||||
ok(!defined $sth->{HandleError}, '... checking HandleError attribute for sth');
|
||||
ok(!defined $sth->{Profile}, '... checking Profile attribute for sth');
|
||||
ok(!defined $sth->{ReadOnly}, '... checking ReadOnly attribute for sth');
|
||||
|
||||
cmp_ok($sth->{TraceLevel}, '==', $DBI::dbi_debug & 0xF, '... checking TraceLevel attribute for sth');
|
||||
cmp_ok($sth->{LongReadLen}, '==', 80, '... checking LongReadLen attribute for sth');
|
||||
|
||||
is($sth->{FetchHashKeyName}, 'NAME', '... checking FetchHashKeyName attribute for sth');
|
||||
|
||||
# sth specific attr
|
||||
ok(!defined $sth->{CursorName}, '... checking CursorName attribute for sth');
|
||||
|
||||
cmp_ok($sth->{NUM_OF_FIELDS}, '==', 2, '... checking NUM_OF_FIELDS attribute for sth');
|
||||
cmp_ok($sth->{NUM_OF_PARAMS}, '==', 1, '... checking NUM_OF_PARAMS attribute for sth');
|
||||
|
||||
my $name = $sth->{NAME};
|
||||
is(ref($name), 'ARRAY', '... checking type of NAME attribute for sth');
|
||||
cmp_ok(scalar(@{$name}), '==', 2, '... checking number of elements returned');
|
||||
is_deeply($name, ['ctime', 'name' ], '... checking values returned');
|
||||
|
||||
my $name_lc = $sth->{NAME_lc};
|
||||
is(ref($name_lc), 'ARRAY', '... checking type of NAME_lc attribute for sth');
|
||||
cmp_ok(scalar(@{$name_lc}), '==', 2, '... checking number of elements returned');
|
||||
is_deeply($name_lc, ['ctime', 'name' ], '... checking values returned');
|
||||
|
||||
my $name_uc = $sth->{NAME_uc};
|
||||
is(ref($name_uc), 'ARRAY', '... checking type of NAME_uc attribute for sth');
|
||||
cmp_ok(scalar(@{$name_uc}), '==', 2, '... checking number of elements returned');
|
||||
is_deeply($name_uc, ['CTIME', 'NAME' ], '... checking values returned');
|
||||
|
||||
my $nhash = $sth->{NAME_hash};
|
||||
is(ref($nhash), 'HASH', '... checking type of NAME_hash attribute for sth');
|
||||
cmp_ok(scalar(keys(%{$nhash})), '==', 2, '... checking number of keys returned');
|
||||
cmp_ok($nhash->{ctime}, '==', 0, '... checking values returned');
|
||||
cmp_ok($nhash->{name}, '==', 1, '... checking values returned');
|
||||
|
||||
my $nhash_lc = $sth->{NAME_lc_hash};
|
||||
is(ref($nhash_lc), 'HASH', '... checking type of NAME_lc_hash attribute for sth');
|
||||
cmp_ok(scalar(keys(%{$nhash_lc})), '==', 2, '... checking number of keys returned');
|
||||
cmp_ok($nhash_lc->{ctime}, '==', 0, '... checking values returned');
|
||||
cmp_ok($nhash_lc->{name}, '==', 1, '... checking values returned');
|
||||
|
||||
my $nhash_uc = $sth->{NAME_uc_hash};
|
||||
is(ref($nhash_uc), 'HASH', '... checking type of NAME_uc_hash attribute for sth');
|
||||
cmp_ok(scalar(keys(%{$nhash_uc})), '==', 2, '... checking number of keys returned');
|
||||
cmp_ok($nhash_uc->{CTIME}, '==', 0, '... checking values returned');
|
||||
cmp_ok($nhash_uc->{NAME}, '==', 1, '... checking values returned');
|
||||
|
||||
if (
|
||||
! $using_autoproxy
|
||||
and
|
||||
# Older Storable does not work properly with tied handles
|
||||
# Instead of hard-depending on newer Storable, just skip this
|
||||
# particular test outright
|
||||
eval { Storable->VERSION("2.16") }
|
||||
) {
|
||||
# set ability to set sth attributes that are usually set internally
|
||||
for $a (qw(NAME NAME_lc NAME_uc NAME_hash NAME_lc_hash NAME_uc_hash)) {
|
||||
my $v = $sth->{$a};
|
||||
ok(eval { $sth->{$a} = dclone($sth->{$a}) }, "Can set sth $a");
|
||||
is_deeply($sth->{$a}, $v, "Can get set sth $a");
|
||||
}
|
||||
}
|
||||
|
||||
my $type = $sth->{TYPE};
|
||||
is(ref($type), 'ARRAY', '... checking type of TYPE attribute for sth');
|
||||
cmp_ok(scalar(@{$type}), '==', 2, '... checking number of elements returned');
|
||||
is_deeply($type, [ 4, 12 ], '... checking values returned');
|
||||
|
||||
my $null = $sth->{NULLABLE};
|
||||
is(ref($null), 'ARRAY', '... checking type of NULLABLE attribute for sth');
|
||||
cmp_ok(scalar(@{$null}), '==', 2, '... checking number of elements returned');
|
||||
is_deeply($null, [ 0, 0 ], '... checking values returned');
|
||||
|
||||
# Should these work? They don't.
|
||||
my $prec = $sth->{PRECISION};
|
||||
is(ref($prec), 'ARRAY', '... checking type of PRECISION attribute for sth');
|
||||
cmp_ok(scalar(@{$prec}), '==', 2, '... checking number of elements returned');
|
||||
is_deeply($prec, [ 10, 1024 ], '... checking values returned');
|
||||
|
||||
my $scale = $sth->{SCALE};
|
||||
is(ref($scale), 'ARRAY', '... checking type of SCALE attribute for sth');
|
||||
cmp_ok(scalar(@{$scale}), '==', 2, '... checking number of elements returned');
|
||||
is_deeply($scale, [ 0, 0 ], '... checking values returned');
|
||||
|
||||
my $params = $sth->{ParamValues};
|
||||
is(ref($params), 'HASH', '... checking type of ParamValues attribute for sth');
|
||||
is($params->{1}, 'foo', '... checking values returned');
|
||||
|
||||
is($sth->{Statement}, "select ctime, name from ?", '... checking Statement attribute for sth');
|
||||
ok(!defined $sth->{RowsInCache}, '... checking type of RowsInCache attribute for sth');
|
||||
|
||||
is $sth->{examplep_private_sth_attrib}, 24, 'should see driver-private sth attribute value';
|
||||
|
||||
# $h->{TraceLevel} tests are in t/09trace.t
|
||||
|
||||
note "Checking inheritance\n";
|
||||
|
||||
SKIP: {
|
||||
skip "drh->dbh->sth inheritance test skipped with DBI_AUTOPROXY", 2 if $ENV{DBI_AUTOPROXY};
|
||||
|
||||
sub check_inherited {
|
||||
my ($drh, $attr, $value, $skip_sth) = @_;
|
||||
local $drh->{$attr} = $value;
|
||||
local $drh->{PrintError} = 1;
|
||||
my $dbh = $drh->connect("dummy");
|
||||
is $dbh->{$attr}, $drh->{$attr}, "dbh $attr value should be inherited from drh";
|
||||
unless ($skip_sth) {
|
||||
my $sth = $dbh->prepare("select name from .");
|
||||
is $sth->{$attr}, $dbh->{$attr}, "sth $attr value should be inherited from dbh";
|
||||
}
|
||||
}
|
||||
|
||||
check_inherited($drh, "ReadOnly", 1, 0);
|
||||
|
||||
}
|
||||
|
||||
done_testing();
|
||||
|
||||
1;
|
||||
# end
|
|
@ -0,0 +1,102 @@
|
|||
#!perl -w
|
||||
$|=1;
|
||||
|
||||
use strict;
|
||||
|
||||
use Test::More;
|
||||
|
||||
use DBI 1.50; # also tests Exporter::require_version
|
||||
|
||||
BEGIN {
|
||||
plan skip_all => '$h->{Kids} attribute not supported for DBI::PurePerl'
|
||||
if $DBI::PurePerl && $DBI::PurePerl; # doubled to avoid typo warning
|
||||
plan tests => 20;
|
||||
}
|
||||
|
||||
## ----------------------------------------------------------------------------
|
||||
## 07kids.t
|
||||
## ----------------------------------------------------------------------------
|
||||
# This test check the Kids and the ActiveKids attributes and how they act
|
||||
# in various situations.
|
||||
#
|
||||
# Check the database handle's kids:
|
||||
# - upon creation of handle
|
||||
# - upon creation of statement handle
|
||||
# - after execute of statement handle
|
||||
# - after finish of statement handle
|
||||
# - after destruction of statement handle
|
||||
# Check the driver handle's kids:
|
||||
# - after creation of database handle
|
||||
# - after disconnection of database handle
|
||||
# - after destruction of database handle
|
||||
## ----------------------------------------------------------------------------
|
||||
|
||||
|
||||
# Connect to the example driver and create a database handle
|
||||
my $dbh = DBI->connect('dbi:ExampleP:dummy', '', '',
|
||||
{
|
||||
PrintError => 1,
|
||||
RaiseError => 0
|
||||
});
|
||||
|
||||
# check our database handle to make sure its good
|
||||
isa_ok($dbh, 'DBI::db');
|
||||
|
||||
# check that it has no Kids or ActiveKids yet
|
||||
cmp_ok($dbh->{Kids}, '==', 0, '... database handle has 0 Kid(s) at start');
|
||||
cmp_ok($dbh->{ActiveKids}, '==', 0, '... database handle has 0 ActiveKid(s) at start');
|
||||
|
||||
# create a scope for our $sth to live and die in
|
||||
do {
|
||||
|
||||
# create a statement handle
|
||||
my $sth = $dbh->prepare('select uid from ./');
|
||||
|
||||
# verify that it is a correct statement handle
|
||||
isa_ok($sth, "DBI::st");
|
||||
|
||||
# check our Kids and ActiveKids after prepare
|
||||
cmp_ok($dbh->{Kids}, '==', 1, '... database handle has 1 Kid(s) after $dbh->prepare');
|
||||
cmp_ok($dbh->{ActiveKids}, '==', 0, '... database handle has 0 ActiveKid(s) after $dbh->prepare');
|
||||
|
||||
$sth->execute();
|
||||
|
||||
# check our Kids and ActiveKids after execute
|
||||
cmp_ok($dbh->{Kids}, '==', 1, '... database handle has 1 Kid(s) after $sth->execute');
|
||||
cmp_ok($dbh->{ActiveKids}, '==', 1, '... database handle has 1 ActiveKid(s) after $sth->execute');
|
||||
|
||||
$sth->finish();
|
||||
|
||||
# check our Kids and Activekids after finish
|
||||
cmp_ok($dbh->{Kids}, '==', 1, '... database handle has 1 Kid(s) after $sth->finish');
|
||||
cmp_ok($dbh->{ActiveKids}, '==', 0, '... database handle has 0 ActiveKid(s) after $sth->finish');
|
||||
|
||||
};
|
||||
|
||||
# now check it after the statement handle has been destroyed
|
||||
cmp_ok($dbh->{Kids}, '==', 0, '... database handle has 0 Kid(s) after $sth is destroyed');
|
||||
cmp_ok($dbh->{ActiveKids}, '==', 0, '... database handle has 0 ActiveKid(s) after $sth is destroyed');
|
||||
|
||||
# get the database handles driver Driver
|
||||
my $drh = $dbh->{Driver};
|
||||
|
||||
# check that is it a correct driver handle
|
||||
isa_ok($drh, "DBI::dr");
|
||||
|
||||
# check the driver's Kids and ActiveKids
|
||||
cmp_ok( $drh->{Kids}, '==', 1, '... driver handle has 1 Kid(s)');
|
||||
cmp_ok( $drh->{ActiveKids}, '==', 1, '... driver handle has 1 ActiveKid(s)');
|
||||
|
||||
$dbh->disconnect;
|
||||
|
||||
# check the driver's Kids and ActiveKids after $dbh->disconnect
|
||||
cmp_ok( $drh->{Kids}, '==', 1, '... driver handle has 1 Kid(s) after $dbh->disconnect');
|
||||
cmp_ok( $drh->{ActiveKids}, '==', 0, '... driver handle has 0 ActiveKid(s) after $dbh->disconnect');
|
||||
|
||||
undef $dbh;
|
||||
ok(!defined($dbh), '... lets be sure that $dbh is not undefined');
|
||||
|
||||
# check the driver's Kids and ActiveKids after undef $dbh
|
||||
cmp_ok( $drh->{Kids}, '==', 0, '... driver handle has 0 Kid(s) after undef $dbh');
|
||||
cmp_ok( $drh->{ActiveKids}, '==', 0, '... driver handle has 0 ActiveKid(s) after undef $dbh');
|
||||
|
|
@ -0,0 +1,352 @@
|
|||
#!perl -w
|
||||
|
||||
use strict;
|
||||
|
||||
use Test::More;
|
||||
|
||||
## ----------------------------------------------------------------------------
|
||||
## 08keeperr.t
|
||||
## ----------------------------------------------------------------------------
|
||||
#
|
||||
## ----------------------------------------------------------------------------
|
||||
|
||||
BEGIN {
|
||||
use_ok('DBI');
|
||||
}
|
||||
|
||||
$|=1;
|
||||
$^W=1;
|
||||
|
||||
## ----------------------------------------------------------------------------
|
||||
# subclass DBI
|
||||
|
||||
# DBI subclass
|
||||
package My::DBI;
|
||||
use base 'DBI';
|
||||
|
||||
# Database handle subclass
|
||||
package My::DBI::db;
|
||||
use base 'DBI::db';
|
||||
|
||||
# Statement handle subclass
|
||||
package My::DBI::st;
|
||||
use base 'DBI::st';
|
||||
|
||||
sub execute {
|
||||
my $sth = shift;
|
||||
# we localize an attribute here to check that the corresponding STORE
|
||||
# at scope exit doesn't clear any recorded error
|
||||
local $sth->{Warn} = 0;
|
||||
my $rv = $sth->SUPER::execute(@_);
|
||||
return $rv;
|
||||
}
|
||||
|
||||
|
||||
## ----------------------------------------------------------------------------
|
||||
# subclass the subclass of DBI
|
||||
|
||||
package Test;
|
||||
|
||||
use strict;
|
||||
use base 'My::DBI';
|
||||
|
||||
use DBI;
|
||||
|
||||
my @con_info = ('dbi:ExampleP:.', undef, undef, { PrintError => 0, RaiseError => 1 });
|
||||
|
||||
sub test_select {
|
||||
my $dbh = shift;
|
||||
eval { $dbh->selectrow_arrayref('select * from foo') };
|
||||
$dbh->disconnect;
|
||||
return $@;
|
||||
}
|
||||
|
||||
my $err1 = test_select( My::DBI->connect(@con_info) );
|
||||
Test::More::like($err1, qr/^DBD::(ExampleP|Multiplex|Gofer)::db selectrow_arrayref failed: opendir/, '... checking error');
|
||||
|
||||
my $err2 = test_select( DBI->connect(@con_info) );
|
||||
Test::More::like($err2, qr/^DBD::(ExampleP|Multiplex|Gofer)::db selectrow_arrayref failed: opendir/, '... checking error');
|
||||
|
||||
package main;
|
||||
|
||||
my $using_dbd_gofer = ( $ENV{DBI_AUTOPROXY} || '' ) =~ /^dbi:Gofer.*transport=/i;
|
||||
|
||||
|
||||
# test ping does not destroy the errstr
|
||||
sub ping_keeps_err {
|
||||
my $dbh = DBI->connect('DBI:ExampleP:', undef, undef, { PrintError => 0 });
|
||||
|
||||
$dbh->set_err(42, "ERROR 42");
|
||||
is $dbh->err, 42;
|
||||
is $dbh->errstr, "ERROR 42";
|
||||
ok $dbh->ping, "ping returns true";
|
||||
is $dbh->err, 42, "err unchanged after ping";
|
||||
is $dbh->errstr, "ERROR 42", "errstr unchanged after ping";
|
||||
|
||||
$dbh->disconnect;
|
||||
|
||||
$dbh->set_err(42, "ERROR 42");
|
||||
is $dbh->err, 42, "err unchanged after ping";
|
||||
is $dbh->errstr, "ERROR 42", "errstr unchanged after ping";
|
||||
ok !$dbh->ping, "ping returns false";
|
||||
# it's reasonable for ping() to set err/errstr if it fails
|
||||
# so here we just test that there is an error
|
||||
ok $dbh->err, "err true after failed ping";
|
||||
ok $dbh->errstr, "errstr true after failed ping";
|
||||
|
||||
|
||||
# for a driver which doesn't have its own ping
|
||||
$dbh = DBI->connect('DBI:Sponge:', undef, undef, { PrintError => 0 });
|
||||
$dbh->STORE(Active => 1);
|
||||
|
||||
$dbh->set_err(42, "ERROR 42");
|
||||
is $dbh->err, 42;
|
||||
is $dbh->errstr, "ERROR 42";
|
||||
ok $dbh->ping, "ping returns true: ".$dbh->ping;
|
||||
is $dbh->err, 42, "err unchanged after ping";
|
||||
is $dbh->errstr, "ERROR 42", "errstr unchanged after ping";
|
||||
|
||||
$dbh->disconnect;
|
||||
$dbh->STORE(Active => 0);
|
||||
|
||||
$dbh->set_err(42, "ERROR 42");
|
||||
is $dbh->err, 42, "err unchanged after ping";
|
||||
is $dbh->errstr, "ERROR 42", "errstr unchanged after ping";
|
||||
ok !$dbh->ping, "ping returns false";
|
||||
# it's reasonable for ping() to set err/errstr if it fails
|
||||
# so here we just test that there is an error
|
||||
ok $dbh->err, "err true after failed ping";
|
||||
ok $dbh->errstr, "errstr true after failed ping";
|
||||
|
||||
}
|
||||
|
||||
## ----------------------------------------------------------------------------
|
||||
print "Test HandleSetErr\n";
|
||||
|
||||
my $dbh = DBI->connect(@con_info);
|
||||
isa_ok($dbh, "DBI::db");
|
||||
|
||||
$dbh->{RaiseError} = 1;
|
||||
$dbh->{PrintError} = 1;
|
||||
$dbh->{RaiseWarn} = 0;
|
||||
$dbh->{PrintWarn} = 1;
|
||||
|
||||
# warning handler
|
||||
my %warn;
|
||||
my @handlewarn;
|
||||
|
||||
sub reset_warn_counts {
|
||||
%warn = ( failed => 0, warning => 0 );
|
||||
@handlewarn = (0,0,0);
|
||||
}
|
||||
reset_warn_counts();
|
||||
|
||||
$SIG{__WARN__} = sub {
|
||||
my $msg = shift;
|
||||
if ($msg =~ /^DBD::\w+::\S+\s+(\S+)\s+(\w+)/) {
|
||||
++$warn{$2};
|
||||
$msg =~ s/\n/\\n/g;
|
||||
print "warn: '$msg'\n";
|
||||
return;
|
||||
}
|
||||
warn $msg;
|
||||
};
|
||||
|
||||
# HandleSetErr handler
|
||||
$dbh->{HandleSetErr} = sub {
|
||||
my ($h, $err, $errstr, $state) = @_;
|
||||
return 0
|
||||
unless defined $err;
|
||||
++$handlewarn[ $err ? 2 : length($err) ]; # count [info, warn, err] calls
|
||||
return 1
|
||||
if $state && $state eq "return"; # for tests
|
||||
($_[1], $_[2], $_[3]) = (99, "errstr99", "OV123")
|
||||
if $state && $state eq "override"; # for tests
|
||||
return 0
|
||||
if $err; # be transparent for errors
|
||||
local $^W;
|
||||
print "HandleSetErr called: h=$h, err=$err, errstr=$errstr, state=$state\n";
|
||||
return 0;
|
||||
};
|
||||
|
||||
# start our tests
|
||||
|
||||
ok(!defined $DBI::err, '... $DBI::err is not defined');
|
||||
|
||||
# ----
|
||||
|
||||
$dbh->set_err("", "(got info)");
|
||||
|
||||
ok(defined $DBI::err, '... $DBI::err is defined'); # true
|
||||
is($DBI::err, "", '... $DBI::err is an empty string');
|
||||
is($DBI::errstr, "(got info)", '... $DBI::errstr is as we expected');
|
||||
is($dbh->errstr, "(got info)", '... $dbh->errstr matches $DBI::errstr');
|
||||
cmp_ok($warn{failed}, '==', 0, '... $warn{failed} is 0');
|
||||
cmp_ok($warn{warning}, '==', 0, '... $warn{warning} is 0');
|
||||
is_deeply(\@handlewarn, [ 1, 0, 0 ], '... the @handlewarn array is (1, 0, 0)');
|
||||
|
||||
# ----
|
||||
|
||||
$dbh->set_err(0, "(got warn)", "AA001"); # triggers PrintWarn
|
||||
|
||||
ok(defined $DBI::err, '... $DBI::err is defined');
|
||||
is($DBI::err, "0", '... $DBI::err is "0"');
|
||||
is($DBI::errstr, "(got info)\n(got warn)",
|
||||
'... $DBI::errstr is as we expected');
|
||||
is($dbh->errstr, "(got info)\n(got warn)",
|
||||
'... $dbh->errstr matches $DBI::errstr');
|
||||
is($DBI::state, "AA001", '... $DBI::state is AA001');
|
||||
cmp_ok($warn{warning}, '==', 1, '... $warn{warning} is 1');
|
||||
is_deeply(\@handlewarn, [ 1, 1, 0 ], '... the @handlewarn array is (1, 1, 0)');
|
||||
|
||||
|
||||
# ----
|
||||
|
||||
$dbh->set_err("", "(got more info)"); # triggers PrintWarn
|
||||
|
||||
ok(defined $DBI::err, '... $DBI::err is defined');
|
||||
is($DBI::err, "0", '... $DBI::err is "0"'); # not "", ie it's still a warn
|
||||
is($dbh->err, "0", '... $dbh->err is "0"');
|
||||
is($DBI::state, "AA001", '... $DBI::state is AA001');
|
||||
is($DBI::errstr, "(got info)\n(got warn)\n(got more info)",
|
||||
'... $DBI::errstr is as we expected');
|
||||
is($dbh->errstr, "(got info)\n(got warn)\n(got more info)",
|
||||
'... $dbh->errstr matches $DBI::errstr');
|
||||
cmp_ok($warn{warning}, '==', 2, '... $warn{warning} is 2');
|
||||
is_deeply(\@handlewarn, [ 2, 1, 0 ], '... the @handlewarn array is (2, 1, 0)');
|
||||
|
||||
# ----
|
||||
|
||||
$dbh->{RaiseError} = 0;
|
||||
$dbh->{PrintError} = 1;
|
||||
$dbh->{RaiseWarn} = 1;
|
||||
|
||||
# ----
|
||||
|
||||
$dbh->set_err("42", "(got error)", "AA002");
|
||||
|
||||
ok(defined $DBI::err, '... $DBI::err is defined');
|
||||
cmp_ok($DBI::err, '==', 42, '... $DBI::err is 42');
|
||||
cmp_ok($warn{warning}, '==', 2, '... $warn{warning} is 2');
|
||||
is($dbh->errstr, "(got info)\n(got warn)\n(got more info) [state was AA001 now AA002]\n(got error)",
|
||||
'... $dbh->errstr is as we expected');
|
||||
is($DBI::state, "AA002", '... $DBI::state is AA002');
|
||||
is_deeply(\@handlewarn, [ 2, 1, 1 ], '... the @handlewarn array is (2, 1, 1)');
|
||||
|
||||
# ----
|
||||
|
||||
$dbh->set_err("", "(got info)");
|
||||
|
||||
ok(defined $DBI::err, '... $DBI::err is defined');
|
||||
cmp_ok($DBI::err, '==', 42, '... $DBI::err is 42');
|
||||
cmp_ok($warn{warning}, '==', 2, '... $warn{warning} is 2');
|
||||
is($dbh->errstr, "(got info)\n(got warn)\n(got more info) [state was AA001 now AA002]\n(got error)\n(got info)",
|
||||
'... $dbh->errstr is as we expected');
|
||||
is_deeply(\@handlewarn, [ 3, 1, 1 ], '... the @handlewarn array is (3, 1, 1)');
|
||||
|
||||
# ----
|
||||
|
||||
$dbh->set_err("0", "(got warn)"); # no PrintWarn because it's already an err
|
||||
|
||||
ok(defined $DBI::err, '... $DBI::err is defined');
|
||||
cmp_ok($DBI::err, '==', 42, '... $DBI::err is 42');
|
||||
cmp_ok($warn{warning}, '==', 2, '... $warn{warning} is 2');
|
||||
is($dbh->errstr, "(got info)\n(got warn)\n(got more info) [state was AA001 now AA002]\n(got error)\n(got info)\n(got warn)",
|
||||
'... $dbh->errstr is as we expected');
|
||||
is_deeply(\@handlewarn, [ 3, 2, 1 ], '... the @handlewarn array is (3, 2, 1)');
|
||||
|
||||
# ----
|
||||
|
||||
$dbh->set_err("4200", "(got new error)", "AA003");
|
||||
|
||||
ok(defined $DBI::err, '... $DBI::err is defined');
|
||||
cmp_ok($DBI::err, '==', 4200, '... $DBI::err is 4200');
|
||||
cmp_ok($warn{warning}, '==', 2, '... $warn{warning} is 2');
|
||||
is($dbh->errstr, "(got info)\n(got warn)\n(got more info) [state was AA001 now AA002]\n(got error)\n(got info)\n(got warn) [err was 42 now 4200] [state was AA002 now AA003]\n(got new error)",
|
||||
'... $dbh->errstr is as we expected');
|
||||
is_deeply(\@handlewarn, [ 3, 2, 2 ], '... the @handlewarn array is (3, 2, 2)');
|
||||
|
||||
# ----
|
||||
|
||||
$dbh->set_err(undef, "foo", "bar"); # clear error
|
||||
|
||||
ok(!defined $dbh->errstr, '... $dbh->errstr is defined');
|
||||
ok(!defined $dbh->err, '... $dbh->err is defined');
|
||||
is($dbh->state, "", '... $dbh->state is an empty string');
|
||||
|
||||
# ----
|
||||
|
||||
reset_warn_counts();
|
||||
|
||||
# ----
|
||||
|
||||
my @ret;
|
||||
@ret = $dbh->set_err(1, "foo"); # PrintError
|
||||
|
||||
cmp_ok(scalar(@ret), '==', 1, '... only returned one value');
|
||||
ok(!defined $ret[0], '... the first value is undefined');
|
||||
ok(!defined $dbh->set_err(2, "bar"), '... $dbh->set_err returned undefiend'); # PrintError
|
||||
ok(!defined $dbh->set_err(3, "baz"), '... $dbh->set_err returned undefiend'); # PrintError
|
||||
ok(!defined $dbh->set_err(0, "warn"), '... $dbh->set_err returned undefiend'); # PrintError
|
||||
is($dbh->errstr, "foo [err was 1 now 2]\nbar [err was 2 now 3]\nbaz\nwarn",
|
||||
'... $dbh->errstr is as we expected');
|
||||
is($warn{failed}, 4, '... $warn{failed} is 4');
|
||||
is_deeply(\@handlewarn, [ 0, 1, 3 ], '... the @handlewarn array is (0, 1, 3)');
|
||||
|
||||
# ----
|
||||
|
||||
$dbh->set_err(undef, undef, undef); # clear error
|
||||
|
||||
@ret = $dbh->set_err(1, "foo", "AA123", "method");
|
||||
cmp_ok(scalar @ret, '==', 1, '... only returned one value');
|
||||
ok(!defined $ret[0], '... the first value is undefined');
|
||||
|
||||
@ret = $dbh->set_err(1, "foo", "AA123", "method", "42");
|
||||
cmp_ok(scalar @ret, '==', 1, '... only returned one value');
|
||||
is($ret[0], "42", '... the first value is "42"');
|
||||
|
||||
@ret = $dbh->set_err(1, "foo", "return");
|
||||
cmp_ok(scalar @ret, '==', 0, '... returned no values');
|
||||
|
||||
# ----
|
||||
|
||||
$dbh->set_err(undef, undef, undef); # clear error
|
||||
|
||||
@ret = $dbh->set_err("", "info", "override");
|
||||
cmp_ok(scalar @ret, '==', 1, '... only returned one value');
|
||||
ok(!defined $ret[0], '... the first value is undefined');
|
||||
cmp_ok($dbh->err, '==', 99, '... $dbh->err is 99');
|
||||
is($dbh->errstr, "errstr99", '... $dbh->errstr is as we expected');
|
||||
is($dbh->state, "OV123", '... $dbh->state is as we expected');
|
||||
$dbh->disconnect;
|
||||
|
||||
# ---
|
||||
|
||||
ping_keeps_err();
|
||||
|
||||
# ---
|
||||
|
||||
reset_warn_counts();
|
||||
|
||||
SKIP: {
|
||||
# we could test this with gofer is we used a different keep_err method other than STORE
|
||||
# to trigger the set_err calls
|
||||
skip 'set_err keep_error skipped for Gofer', 2
|
||||
if $using_dbd_gofer;
|
||||
|
||||
$dbh->{examplep_set_err} = ""; # set information state
|
||||
cmp_ok($warn{warning}, '==', 0, 'no extra warning generated for set_err("") in STORE');
|
||||
|
||||
$dbh->{RaiseWarn} = 0;
|
||||
|
||||
$dbh->{examplep_set_err} = "0"; # set warning state
|
||||
cmp_ok($warn{warning}, '==', 1, 'warning generated for set_err("0") in STORE');
|
||||
}
|
||||
|
||||
# ---
|
||||
|
||||
# ----
|
||||
done_testing();
|
||||
|
||||
1;
|
||||
# end
|
|
@ -0,0 +1,137 @@
|
|||
#!perl -w
|
||||
# vim:sw=4:ts=8
|
||||
|
||||
use strict;
|
||||
|
||||
use Test::More tests => 99;
|
||||
|
||||
## ----------------------------------------------------------------------------
|
||||
## 09trace.t
|
||||
## ----------------------------------------------------------------------------
|
||||
#
|
||||
## ----------------------------------------------------------------------------
|
||||
|
||||
BEGIN {
|
||||
$ENV{DBI_TRACE} = 0; # for PurePerl - ensure DBI_TRACE is in the env
|
||||
use_ok( 'DBI' );
|
||||
}
|
||||
|
||||
$|=1;
|
||||
|
||||
|
||||
my $trace_file = "dbitrace$$.log";
|
||||
|
||||
1 while unlink $trace_file;
|
||||
warn "Can't unlink existing $trace_file: $!" if -e $trace_file;
|
||||
|
||||
my $orig_trace_level = DBI->trace;
|
||||
DBI->trace(3, $trace_file); # enable trace before first driver load
|
||||
|
||||
my $dbh = DBI->connect('dbi:ExampleP(AutoCommit=>1):', undef, undef);
|
||||
die "Unable to connect to ExampleP driver: $DBI::errstr" unless $dbh;
|
||||
|
||||
isa_ok($dbh, 'DBI::db');
|
||||
|
||||
$dbh->dump_handle("dump_handle test, write to log file", 2);
|
||||
|
||||
DBI->trace(0, undef); # turn off and restore to STDERR
|
||||
|
||||
SKIP: {
|
||||
skip "cygwin has buffer flushing bug", 1 if ($^O =~ /cygwin/i);
|
||||
ok( -s $trace_file, "trace file size = " . -s $trace_file);
|
||||
}
|
||||
|
||||
DBI->trace($orig_trace_level); # no way to restore previous outfile XXX
|
||||
|
||||
|
||||
# Clean up when we're done.
|
||||
END { $dbh->disconnect if $dbh;
|
||||
1 while unlink $trace_file; };
|
||||
|
||||
## ----------------------------------------------------------------------------
|
||||
# Check the database handle attributes.
|
||||
|
||||
cmp_ok($dbh->{TraceLevel}, '==', $DBI::dbi_debug & 0xF, '... checking TraceLevel attribute');
|
||||
|
||||
1 while unlink $trace_file;
|
||||
|
||||
$dbh->trace(0, $trace_file);
|
||||
ok( -f $trace_file, '... trace file successfully created');
|
||||
|
||||
my @names = qw(
|
||||
SQL
|
||||
CON
|
||||
ENC
|
||||
DBD
|
||||
TXN
|
||||
foo bar baz boo bop
|
||||
);
|
||||
my %flag;
|
||||
my $all_flags = 0;
|
||||
|
||||
foreach my $name (@names) {
|
||||
print "parse_trace_flag $name\n";
|
||||
ok( my $flag1 = $dbh->parse_trace_flag($name) );
|
||||
ok( my $flag2 = $dbh->parse_trace_flags($name) );
|
||||
is( $flag1, $flag2 );
|
||||
|
||||
$dbh->{TraceLevel} = $flag1;
|
||||
is( $dbh->{TraceLevel}, $flag1 );
|
||||
|
||||
$dbh->{TraceLevel} = 0;
|
||||
is( $dbh->{TraceLevel}, 0 );
|
||||
|
||||
$dbh->trace($flag1);
|
||||
is $dbh->trace, $flag1;
|
||||
is $dbh->{TraceLevel}, $flag1;
|
||||
|
||||
$dbh->{TraceLevel} = $name; # set by name
|
||||
$dbh->{TraceLevel} = undef; # check no change on undef
|
||||
is( $dbh->{TraceLevel}, $flag1 );
|
||||
|
||||
$flag{$name} = $flag1;
|
||||
$all_flags |= $flag1
|
||||
if defined $flag1; # reduce noise if there's a bug
|
||||
}
|
||||
|
||||
print "parse_trace_flag @names\n";
|
||||
ok(eq_set([ keys %flag ], [ @names ]), '...');
|
||||
$dbh->{TraceLevel} = 0;
|
||||
$dbh->{TraceLevel} = join "|", @names;
|
||||
is($dbh->{TraceLevel}, $all_flags, '...');
|
||||
|
||||
{
|
||||
print "inherit\n";
|
||||
my $sth = $dbh->prepare("select ctime, name from foo");
|
||||
isa_ok( $sth, 'DBI::st' );
|
||||
is( $sth->{TraceLevel}, $all_flags );
|
||||
}
|
||||
|
||||
$dbh->{TraceLevel} = 0;
|
||||
ok !$dbh->{TraceLevel};
|
||||
$dbh->{TraceLevel} = 'ALL';
|
||||
ok $dbh->{TraceLevel};
|
||||
|
||||
{
|
||||
print "test unknown parse_trace_flag\n";
|
||||
my $warn = 0;
|
||||
local $SIG{__WARN__} = sub {
|
||||
if ($_[0] =~ /unknown/i) { ++$warn; print "caught warn: ",@_ }else{ warn @_ }
|
||||
};
|
||||
is $dbh->parse_trace_flag("nonesuch"), undef;
|
||||
is $warn, 0;
|
||||
is $dbh->parse_trace_flags("nonesuch"), 0;
|
||||
is $warn, 1;
|
||||
is $dbh->parse_trace_flags("nonesuch|SQL|nonesuch2"), $dbh->parse_trace_flag("SQL");
|
||||
is $warn, 2;
|
||||
}
|
||||
|
||||
$dbh->dump_handle("dump_handle test, write to log file", 2);
|
||||
|
||||
$dbh->trace(0);
|
||||
ok !$dbh->{TraceLevel};
|
||||
$dbh->trace(undef, "STDERR"); # close $trace_file
|
||||
ok( -s $trace_file );
|
||||
|
||||
1;
|
||||
# end
|
|
@ -0,0 +1,640 @@
|
|||
#!perl -w
|
||||
|
||||
use lib qw(blib/arch blib/lib); # needed since -T ignores PERL5LIB
|
||||
use DBI qw(:sql_types);
|
||||
use Config;
|
||||
use Cwd;
|
||||
use strict;
|
||||
use Data::Dumper;
|
||||
|
||||
$^W = 1;
|
||||
$| = 1;
|
||||
|
||||
require File::Basename;
|
||||
require File::Spec;
|
||||
require VMS::Filespec if $^O eq 'VMS';
|
||||
|
||||
use Test::More tests => 242;
|
||||
|
||||
do {
|
||||
# provide some protection against growth in size of '.' during the test
|
||||
# which was probable cause of this failure
|
||||
# http://www.nntp.perl.org/group/perl.cpan.testers/2009/09/msg5297317.html
|
||||
my $tmpfile = "deleteme_$$";
|
||||
open my $fh, ">$tmpfile";
|
||||
close $fh;
|
||||
unlink $tmpfile;
|
||||
};
|
||||
|
||||
# "globals"
|
||||
my ($r, $dbh);
|
||||
|
||||
ok !eval {
|
||||
$dbh = DBI->connect("dbi:NoneSuch:foobar", 1, 1, { RaiseError => 1, AutoCommit => 1 });
|
||||
}, 'connect should fail';
|
||||
like($@, qr/install_driver\(NoneSuch\) failed/, '... we should have an exception here');
|
||||
ok(!$dbh, '... $dbh2 should not be defined');
|
||||
|
||||
{
|
||||
my ($error, $tdbh);
|
||||
eval {
|
||||
$tdbh = DBI->connect('dbi:ExampleP:', '', []);
|
||||
} or do {
|
||||
$error= $@ || "Zombie Error";
|
||||
};
|
||||
like($error,qr/Usage:/,"connect with unblessed ref password should fail");
|
||||
ok(!defined($tdbh), '... $dbh should not be defined');
|
||||
}
|
||||
{
|
||||
package Test::Secret;
|
||||
use overload '""' => sub { return "" };
|
||||
}
|
||||
{
|
||||
my ($error,$tdbh);
|
||||
eval {
|
||||
$tdbh = DBI->connect('dbi:ExampleP:', '', bless [], "Test::Secret");
|
||||
} or do {
|
||||
$error= $@ || "Zombie Error";
|
||||
};
|
||||
ok(!$error,"connect with blessed ref password should not fail");
|
||||
ok(defined($tdbh), '... $dbh should be defined');
|
||||
}
|
||||
|
||||
$dbh = DBI->connect('dbi:ExampleP:', '', '');
|
||||
|
||||
sub check_connect_cached {
|
||||
# connect_cached
|
||||
# ------------------------------------------
|
||||
# This test checks that connect_cached works
|
||||
# and how it then relates to the CachedKids
|
||||
# attribute for the driver.
|
||||
|
||||
ok my $dbh_cached_1 = DBI->connect_cached('dbi:ExampleP:', '', '', { TraceLevel=>0, Executed => 0 });
|
||||
|
||||
ok my $dbh_cached_2 = DBI->connect_cached('dbi:ExampleP:', '', '', { TraceLevel=>0, Executed => 0 });
|
||||
|
||||
is($dbh_cached_1, $dbh_cached_2, '... these 2 handles are cached, so they are the same');
|
||||
|
||||
ok my $dbh_cached_3 = DBI->connect_cached('dbi:ExampleP:', '', '', { examplep_foo => 1 });
|
||||
|
||||
isnt($dbh_cached_3, $dbh_cached_2, '... this handle was created with different parameters, so it is not the same');
|
||||
|
||||
# check that cached_connect applies attributes to handles returned from the cache
|
||||
# (The specific case of Executed is relevant to DBD::Gofer retry-on-error logic)
|
||||
ok $dbh_cached_1->do("select * from ."); # set Executed flag
|
||||
ok $dbh_cached_1->{Executed}, 'Executed should be true';
|
||||
ok my $dbh_cached_4 = DBI->connect_cached('dbi:ExampleP:', '', '', { TraceLevel=>0, Executed => 0 });
|
||||
is $dbh_cached_4, $dbh_cached_1, 'should return same handle';
|
||||
ok !$dbh_cached_4->{Executed}, 'Executed should be false because reset by connect attributes';
|
||||
|
||||
my $drh = $dbh->{Driver};
|
||||
isa_ok($drh, "DBI::dr");
|
||||
|
||||
my @cached_kids = values %{$drh->{CachedKids}};
|
||||
ok(eq_set(\@cached_kids, [ $dbh_cached_1, $dbh_cached_3 ]), '... these are our cached kids');
|
||||
|
||||
$drh->{CachedKids} = {};
|
||||
cmp_ok(scalar(keys %{$drh->{CachedKids}}), '==', 0, '... we have emptied out cache');
|
||||
}
|
||||
|
||||
check_connect_cached();
|
||||
|
||||
$dbh->{AutoCommit} = 1;
|
||||
$dbh->{PrintError} = 0;
|
||||
|
||||
ok($dbh->{AutoCommit} == 1);
|
||||
cmp_ok($dbh->{PrintError}, '==', 0, '... PrintError should be 0');
|
||||
|
||||
is($dbh->{FetchHashKeyName}, 'NAME', '... FetchHashKey is NAME');
|
||||
|
||||
# test access to driver-private attributes
|
||||
like($dbh->{example_driver_path}, qr/DBD\/ExampleP\.pm$/, '... checking the example driver_path');
|
||||
|
||||
print "others\n";
|
||||
eval { $dbh->commit('dummy') };
|
||||
ok($@ =~ m/DBI commit: invalid number of arguments:/, $@)
|
||||
unless $DBI::PurePerl && ok(1);
|
||||
|
||||
ok($dbh->ping, "ping should return true");
|
||||
|
||||
# --- errors
|
||||
my $cursor_e = $dbh->prepare("select unknown_field_name from ?");
|
||||
is($cursor_e, undef, "prepare should fail");
|
||||
ok($dbh->err, "sth->err should be true");
|
||||
ok($DBI::err, "DBI::err should be true");
|
||||
cmp_ok($DBI::err, 'eq', $dbh->err , "\$DBI::err should match \$dbh->err");
|
||||
like($DBI::errstr, qr/Unknown field names: unknown_field_name/, "\$DBI::errstr should contain error string");
|
||||
cmp_ok($DBI::errstr, 'eq', $dbh->errstr, "\$DBI::errstr should match \$dbh->errstr");
|
||||
|
||||
# --- func
|
||||
ok($dbh->errstr eq $dbh->func('errstr'));
|
||||
|
||||
my $std_sql = "select mode,size,name from ?";
|
||||
my $csr_a = $dbh->prepare($std_sql);
|
||||
ok(ref $csr_a);
|
||||
ok($csr_a->{NUM_OF_FIELDS} == 3);
|
||||
|
||||
SKIP: {
|
||||
skip "inner/outer handles not fully supported for DBI::PurePerl", 3 if $DBI::PurePerl;
|
||||
ok(tied %{ $csr_a->{Database} }); # ie is 'outer' handle
|
||||
ok($csr_a->{Database} eq $dbh, "$csr_a->{Database} ne $dbh")
|
||||
unless $dbh->{mx_handle_list} && ok(1); # skip for Multiplex tests
|
||||
ok(tied %{ $csr_a->{Database}->{Driver} }); # ie is 'outer' handle
|
||||
}
|
||||
|
||||
my $driver_name = $csr_a->{Database}->{Driver}->{Name};
|
||||
ok($driver_name eq 'ExampleP')
|
||||
unless $ENV{DBI_AUTOPROXY} && ok(1);
|
||||
|
||||
# --- FetchHashKeyName
|
||||
$dbh->{FetchHashKeyName} = 'NAME_uc';
|
||||
my $csr_b = $dbh->prepare($std_sql);
|
||||
$csr_b->execute('.');
|
||||
ok(ref $csr_b);
|
||||
|
||||
ok($csr_a != $csr_b);
|
||||
|
||||
ok("@{$csr_b->{NAME_lc}}" eq "mode size name"); # before NAME
|
||||
ok("@{$csr_b->{NAME_uc}}" eq "MODE SIZE NAME");
|
||||
ok("@{$csr_b->{NAME}}" eq "mode size name");
|
||||
ok("@{$csr_b->{ $csr_b->{FetchHashKeyName} }}" eq "MODE SIZE NAME");
|
||||
|
||||
ok("@{[sort keys %{$csr_b->{NAME_lc_hash}}]}" eq "mode name size");
|
||||
ok("@{[sort values %{$csr_b->{NAME_lc_hash}}]}" eq "0 1 2");
|
||||
ok("@{[sort keys %{$csr_b->{NAME_uc_hash}}]}" eq "MODE NAME SIZE");
|
||||
ok("@{[sort values %{$csr_b->{NAME_uc_hash}}]}" eq "0 1 2");
|
||||
|
||||
do "./t/lib.pl";
|
||||
|
||||
# get a dir always readable on all platforms
|
||||
#my $dir = getcwd() || cwd();
|
||||
#$dir = VMS::Filespec::unixify($dir) if $^O eq 'VMS';
|
||||
# untaint $dir
|
||||
#$dir =~ m/(.*)/; $dir = $1 || die;
|
||||
my $dir = test_dir ();
|
||||
|
||||
# ---
|
||||
|
||||
my($col0, $col1, $col2, $col3, $rows);
|
||||
my(@row_a, @row_b);
|
||||
|
||||
ok($csr_a->bind_columns(undef, \($col0, $col1, $col2)) );
|
||||
ok($csr_a->execute( $dir ), $DBI::errstr);
|
||||
|
||||
@row_a = $csr_a->fetchrow_array;
|
||||
ok(@row_a);
|
||||
|
||||
# check bind_columns
|
||||
is($row_a[0], $col0);
|
||||
is($row_a[1], $col1);
|
||||
is($row_a[2], $col2);
|
||||
|
||||
ok( ! $csr_a->bind_columns(undef, \($col0, $col1)) );
|
||||
like $csr_a->errstr, '/bind_columns called with 2 values but 3 are needed/', 'errstr should contain error message';
|
||||
ok( ! $csr_a->bind_columns(undef, \($col0, $col1, $col2, $col3)) );
|
||||
like $csr_a->errstr, '/bind_columns called with 4 values but 3 are needed/', 'errstr should contain error message';
|
||||
|
||||
ok( $csr_a->bind_col(2, undef, { foo => 42 }) );
|
||||
ok ! eval { $csr_a->bind_col(0, undef) };
|
||||
like $@, '/bind_col: column 0 is not a valid column \(1..3\)/', 'errstr should contain error message';
|
||||
ok ! eval { $csr_a->bind_col(4, undef) };
|
||||
like $@, '/bind_col: column 4 is not a valid column \(1..3\)/', 'errstr should contain error message';
|
||||
|
||||
ok($csr_b->bind_param(1, $dir));
|
||||
ok($csr_b->execute());
|
||||
@row_b = @{ $csr_b->fetchrow_arrayref };
|
||||
ok(@row_b);
|
||||
|
||||
ok("@row_a" eq "@row_b");
|
||||
@row_b = $csr_b->fetchrow_array;
|
||||
ok("@row_a" ne "@row_b");
|
||||
|
||||
ok($csr_a->finish);
|
||||
ok($csr_b->finish);
|
||||
|
||||
$csr_a = undef; # force destruction of this cursor now
|
||||
ok(1);
|
||||
|
||||
print "fetchrow_hashref('NAME_uc')\n";
|
||||
ok($csr_b->execute());
|
||||
my $row_b = $csr_b->fetchrow_hashref('NAME_uc');
|
||||
ok($row_b);
|
||||
ok($row_b->{MODE} == $row_a[0]);
|
||||
ok($row_b->{SIZE} == $row_a[1]);
|
||||
ok($row_b->{NAME} eq $row_a[2]);
|
||||
|
||||
print "fetchrow_hashref('ParamValues')\n";
|
||||
ok($csr_b->execute());
|
||||
ok(!defined eval { $csr_b->fetchrow_hashref('ParamValues') } ); # PurePerl croaks
|
||||
|
||||
print "FetchHashKeyName\n";
|
||||
ok($csr_b->execute());
|
||||
$row_b = $csr_b->fetchrow_hashref();
|
||||
ok($row_b);
|
||||
ok(keys(%$row_b) == 3);
|
||||
ok($row_b->{MODE} == $row_a[0]);
|
||||
ok($row_b->{SIZE} == $row_a[1]);
|
||||
ok($row_b->{NAME} eq $row_a[2]);
|
||||
|
||||
print "fetchall_arrayref\n";
|
||||
ok($csr_b->execute());
|
||||
$r = $csr_b->fetchall_arrayref;
|
||||
ok($r);
|
||||
ok(@$r);
|
||||
ok($r->[0]->[0] == $row_a[0]);
|
||||
ok($r->[0]->[1] == $row_a[1]);
|
||||
ok($r->[0]->[2] eq $row_a[2]);
|
||||
|
||||
print "fetchall_arrayref array slice\n";
|
||||
ok($csr_b->execute());
|
||||
$r = $csr_b->fetchall_arrayref([2,1]);
|
||||
ok($r && @$r);
|
||||
ok($r->[0]->[1] == $row_a[1]);
|
||||
ok($r->[0]->[0] eq $row_a[2]);
|
||||
|
||||
print "fetchall_arrayref hash slice\n";
|
||||
ok($csr_b->execute());
|
||||
$r = $csr_b->fetchall_arrayref({ SizE=>1, nAMe=>1});
|
||||
ok($r && @$r);
|
||||
ok($r->[0]->{SizE} == $row_a[1]);
|
||||
ok($r->[0]->{nAMe} eq $row_a[2]);
|
||||
|
||||
ok ! $csr_b->fetchall_arrayref({ NoneSuch=>1 });
|
||||
like $DBI::errstr, qr/Invalid column name/;
|
||||
|
||||
print "fetchall_arrayref renaming hash slice\n";
|
||||
ok($csr_b->execute());
|
||||
$r = $csr_b->fetchall_arrayref(\{ 1 => "Koko", 2 => "Nimi"});
|
||||
ok($r && @$r);
|
||||
ok($r->[0]->{Koko} == $row_a[1]);
|
||||
ok($r->[0]->{Nimi} eq $row_a[2]);
|
||||
|
||||
ok ! eval { $csr_b->fetchall_arrayref(\{ 9999 => "Koko" }) };
|
||||
like $@, qr/\Qis not a valid column/;
|
||||
|
||||
print "fetchall_arrayref empty renaming hash slice\n";
|
||||
ok($csr_b->execute());
|
||||
$r = $csr_b->fetchall_arrayref(\{});
|
||||
ok($r && @$r);
|
||||
ok(keys %{$r->[0]} == 0);
|
||||
|
||||
ok($csr_b->execute());
|
||||
ok(!$csr_b->fetchall_arrayref(\[]));
|
||||
like $DBI::errstr, qr/\Qfetchall_arrayref(REF) invalid/;
|
||||
|
||||
print "fetchall_arrayref hash\n";
|
||||
ok($csr_b->execute());
|
||||
$r = $csr_b->fetchall_arrayref({});
|
||||
ok($r);
|
||||
ok(keys %{$r->[0]} == 3);
|
||||
ok("@{$r->[0]}{qw(MODE SIZE NAME)}" eq "@row_a", "'@{$r->[0]}{qw(MODE SIZE NAME)}' ne '@row_a'");
|
||||
|
||||
print "rows()\n"; # assumes previous fetch fetched all rows
|
||||
$rows = $csr_b->rows;
|
||||
ok($rows > 0, "row count $rows");
|
||||
ok($rows == @$r, "$rows vs ".@$r);
|
||||
ok($rows == $DBI::rows, "$rows vs $DBI::rows");
|
||||
|
||||
print "fetchall_arrayref array slice and max rows\n";
|
||||
ok($csr_b->execute());
|
||||
$r = $csr_b->fetchall_arrayref([0], 1);
|
||||
ok($r);
|
||||
is_deeply($r, [[$row_a[0]]]);
|
||||
|
||||
$r = $csr_b->fetchall_arrayref([], 1);
|
||||
is @$r, 1, 'should fetch one row';
|
||||
|
||||
$r = $csr_b->fetchall_arrayref([], 99999);
|
||||
ok @$r, 'should fetch all the remaining rows';
|
||||
|
||||
$r = $csr_b->fetchall_arrayref([], 99999);
|
||||
is $r, undef, 'should return undef as there are no more rows';
|
||||
|
||||
# ---
|
||||
|
||||
print "selectrow_array\n";
|
||||
@row_b = $dbh->selectrow_array($std_sql, undef, $dir);
|
||||
ok(@row_b == 3);
|
||||
ok("@row_b" eq "@row_a");
|
||||
|
||||
print "selectrow_hashref\n";
|
||||
$r = $dbh->selectrow_hashref($std_sql, undef, $dir);
|
||||
ok(keys %$r == 3);
|
||||
ok($r->{MODE} eq $row_a[0]);
|
||||
ok($r->{SIZE} eq $row_a[1]);
|
||||
ok($r->{NAME} eq $row_a[2]);
|
||||
|
||||
print "selectall_arrayref\n";
|
||||
$r = $dbh->selectall_arrayref($std_sql, undef, $dir);
|
||||
ok($r);
|
||||
ok(@{$r->[0]} == 3);
|
||||
ok("@{$r->[0]}" eq "@row_a");
|
||||
ok(@$r == $rows);
|
||||
|
||||
print "selectall_arrayref Slice array slice\n";
|
||||
$r = $dbh->selectall_arrayref($std_sql, { Slice => [ 2, 0 ] }, $dir);
|
||||
ok($r);
|
||||
ok(@{$r->[0]} == 2);
|
||||
ok("@{$r->[0]}" eq "$row_a[2] $row_a[0]", qq{"@{$r->[0]}" eq "$row_a[2] $row_a[0]"});
|
||||
ok(@$r == $rows);
|
||||
|
||||
print "selectall_arrayref Columns array slice\n";
|
||||
$r = $dbh->selectall_arrayref($std_sql, { Columns => [ 3, 1 ] }, $dir);
|
||||
ok($r);
|
||||
ok(@{$r->[0]} == 2);
|
||||
ok("@{$r->[0]}" eq "$row_a[2] $row_a[0]", qq{"@{$r->[0]}" eq "$row_a[2] $row_a[0]"});
|
||||
ok(@$r == $rows);
|
||||
|
||||
print "selectall_arrayref hash slice\n";
|
||||
$r = $dbh->selectall_arrayref($std_sql, { Columns => { MoDe=>1, NamE=>1 } }, $dir);
|
||||
ok($r);
|
||||
ok(keys %{$r->[0]} == 2);
|
||||
ok(exists $r->[0]{MoDe});
|
||||
ok(exists $r->[0]{NamE});
|
||||
ok($r->[0]{MoDe} eq $row_a[0]);
|
||||
ok($r->[0]{NamE} eq $row_a[2]);
|
||||
ok(@$r == $rows);
|
||||
|
||||
print "selectall_array\n";
|
||||
$r = [ $dbh->selectall_array($std_sql, undef, $dir) ];
|
||||
ok($r);
|
||||
ok(@{$r->[0]} == 3);
|
||||
ok("@{$r->[0]}" eq "@row_a");
|
||||
ok(@$r == $rows);
|
||||
|
||||
print "selectall_hashref\n";
|
||||
$r = $dbh->selectall_hashref($std_sql, 'NAME', undef, $dir);
|
||||
ok($r, "selectall_hashref result");
|
||||
is(ref $r, 'HASH', "selectall_hashref HASH: ".ref $r);
|
||||
is(scalar keys %$r, $rows);
|
||||
is($r->{ $row_a[2] }{SIZE}, $row_a[1], qq{$r->{ $row_a[2] }{SIZE} eq $row_a[1]});
|
||||
|
||||
print "selectall_hashref by column number\n";
|
||||
$r = $dbh->selectall_hashref($std_sql, 3, undef, $dir);
|
||||
ok($r);
|
||||
ok($r->{ $row_a[2] }{SIZE} eq $row_a[1], qq{$r->{ $row_a[2] }{SIZE} eq $row_a[1]});
|
||||
|
||||
print "selectcol_arrayref\n";
|
||||
$r = $dbh->selectcol_arrayref($std_sql, undef, $dir);
|
||||
ok($r);
|
||||
ok(@$r == $rows);
|
||||
ok($r->[0] eq $row_b[0]);
|
||||
|
||||
print "selectcol_arrayref column slice\n";
|
||||
$r = $dbh->selectcol_arrayref($std_sql, { Columns => [3,2] }, $dir);
|
||||
ok($r);
|
||||
# warn Dumper([\@row_b, $r]);
|
||||
ok(@$r == $rows * 2);
|
||||
ok($r->[0] eq $row_b[2]);
|
||||
ok($r->[1] eq $row_b[1]);
|
||||
|
||||
# ---
|
||||
|
||||
print "others...\n";
|
||||
my $csr_c;
|
||||
$csr_c = $dbh->prepare("select unknown_field_name1 from ?");
|
||||
ok(!defined $csr_c);
|
||||
ok($DBI::errstr =~ m/Unknown field names: unknown_field_name1/);
|
||||
|
||||
print "RaiseError & PrintError & ShowErrorStatement\n";
|
||||
$dbh->{RaiseError} = 1;
|
||||
ok($dbh->{RaiseError});
|
||||
$dbh->{ShowErrorStatement} = 1;
|
||||
ok($dbh->{ShowErrorStatement});
|
||||
|
||||
my $error_sql = "select unknown_field_name2 from ?";
|
||||
|
||||
ok(! eval { $csr_c = $dbh->prepare($error_sql); 1; });
|
||||
#print "$@\n";
|
||||
like $@, qr/\Q$error_sql/; # ShowErrorStatement
|
||||
like $@, qr/Unknown field names: unknown_field_name2/;
|
||||
|
||||
# check attributes are inherited
|
||||
my $se_sth1 = $dbh->prepare("select mode from ?");
|
||||
ok($se_sth1->{RaiseError});
|
||||
ok($se_sth1->{ShowErrorStatement});
|
||||
|
||||
# check ShowErrorStatement ParamValues are included and sorted
|
||||
$se_sth1->bind_param($_, "val$_") for (1..11);
|
||||
ok( !eval { $se_sth1->execute } );
|
||||
like $@, qr/\[for Statement "select mode from \?" with ParamValues: 1='val1', 2='val2', 3='val3', 4='val4', 5='val5', 6='val6', 7='val7', 8='val8', 9='val9', 10='val10', 11='val11'\]/;
|
||||
|
||||
# this test relies on the fact that ShowErrorStatement is set above
|
||||
TODO: {
|
||||
local $TODO = "rt66127 not fixed yet";
|
||||
eval {
|
||||
local $se_sth1->{PrintError} = 0;
|
||||
$se_sth1->execute(1,2);
|
||||
};
|
||||
unlike($@, qr/ParamValues:/, 'error string does not contain ParamValues');
|
||||
is($se_sth1->{ParamValues}, undef, 'ParamValues is empty')
|
||||
or diag(Dumper($se_sth1->{ParamValues}));
|
||||
};
|
||||
# check that $dbh->{Statement} tracks last _executed_ sth
|
||||
$se_sth1 = $dbh->prepare("select mode from ?");
|
||||
ok($se_sth1->{Statement} eq "select mode from ?");
|
||||
ok($dbh->{Statement} eq "select mode from ?") or print "got: $dbh->{Statement}\n";
|
||||
my $se_sth2 = $dbh->prepare("select name from ?");
|
||||
ok($se_sth2->{Statement} eq "select name from ?");
|
||||
ok($dbh->{Statement} eq "select name from ?");
|
||||
$se_sth1->execute('.');
|
||||
ok($dbh->{Statement} eq "select mode from ?");
|
||||
|
||||
# show error param values
|
||||
ok(! eval { $se_sth1->execute('first','second') }); # too many params
|
||||
ok($@ =~ /\b1='first'/, $@);
|
||||
ok($@ =~ /\b2='second'/, $@);
|
||||
|
||||
$se_sth1->finish;
|
||||
$se_sth2->finish;
|
||||
|
||||
$dbh->{RaiseError} = 0;
|
||||
ok(!$dbh->{RaiseError});
|
||||
$dbh->{ShowErrorStatement} = 0;
|
||||
ok(!$dbh->{ShowErrorStatement});
|
||||
|
||||
{
|
||||
my @warn;
|
||||
local($SIG{__WARN__}) = sub { push @warn, @_ };
|
||||
$dbh->{PrintError} = 1;
|
||||
ok($dbh->{PrintError});
|
||||
ok(! $dbh->selectall_arrayref("select unknown_field_name3 from ?"));
|
||||
ok("@warn" =~ m/Unknown field names: unknown_field_name3/);
|
||||
$dbh->{PrintError} = 0;
|
||||
ok(!$dbh->{PrintError});
|
||||
}
|
||||
|
||||
|
||||
print "HandleError\n";
|
||||
my $HandleErrorReturn;
|
||||
my $HandleError = sub {
|
||||
my $msg = sprintf "HandleError: %s [h=%s, rv=%s, #=%d]",
|
||||
$_[0],$_[1],(defined($_[2])?$_[2]:'undef'),scalar(@_);
|
||||
die $msg if $HandleErrorReturn < 0;
|
||||
print "$msg\n";
|
||||
$_[2] = 42 if $HandleErrorReturn == 2;
|
||||
return $HandleErrorReturn;
|
||||
};
|
||||
|
||||
$dbh->{HandleError} = $HandleError;
|
||||
ok($dbh->{HandleError});
|
||||
ok($dbh->{HandleError} == $HandleError);
|
||||
|
||||
$dbh->{RaiseError} = 1;
|
||||
$dbh->{PrintError} = 0;
|
||||
$error_sql = "select unknown_field_name2 from ?";
|
||||
|
||||
print "HandleError -> die\n";
|
||||
$HandleErrorReturn = -1;
|
||||
ok(! eval { $csr_c = $dbh->prepare($error_sql); 1; });
|
||||
ok($@ =~ m/^HandleError:/, $@);
|
||||
|
||||
print "HandleError -> 0 -> RaiseError\n";
|
||||
$HandleErrorReturn = 0;
|
||||
ok(! eval { $csr_c = $dbh->prepare($error_sql); 1; });
|
||||
ok($@ =~ m/^DBD::(ExampleP|Multiplex|Gofer)::db prepare failed:/, $@);
|
||||
|
||||
print "HandleError -> 1 -> return (original)undef\n";
|
||||
$HandleErrorReturn = 1;
|
||||
$r = eval { $csr_c = $dbh->prepare($error_sql); };
|
||||
ok(!$@, $@);
|
||||
ok(!defined($r), $r);
|
||||
|
||||
print "HandleError -> 2 -> return (modified)42\n";
|
||||
$HandleErrorReturn = 2;
|
||||
$r = eval { $csr_c = $dbh->prepare($error_sql); };
|
||||
ok(!$@, $@);
|
||||
ok($r==42) unless $dbh->{mx_handle_list} && ok(1); # skip for Multiplex
|
||||
|
||||
$dbh->{HandleError} = undef;
|
||||
ok(!$dbh->{HandleError});
|
||||
|
||||
{
|
||||
# dump_results;
|
||||
my $sth = $dbh->prepare($std_sql);
|
||||
|
||||
isa_ok($sth, "DBI::st");
|
||||
|
||||
if (length(File::Spec->updir)) {
|
||||
ok($sth->execute(File::Spec->updir));
|
||||
} else {
|
||||
ok($sth->execute('../'));
|
||||
}
|
||||
|
||||
my $dump_file = "dumpcsr.tst.$$";
|
||||
SKIP: {
|
||||
skip "# dump_results test skipped: unable to open $dump_file: $!\n", 4
|
||||
unless open(DUMP_RESULTS, ">$dump_file");
|
||||
ok($sth->dump_results("10", "\n", ",\t", \*DUMP_RESULTS));
|
||||
close(DUMP_RESULTS) or warn "close $dump_file: $!";
|
||||
ok(-s $dump_file > 0);
|
||||
is( unlink( $dump_file ), 1, "Remove $dump_file" );
|
||||
ok( !-e $dump_file, "Actually gone" );
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
note "table_info\n";
|
||||
# First generate a list of all subdirectories
|
||||
$dir = File::Basename::dirname( $INC{"DBI.pm"} );
|
||||
my $dh;
|
||||
ok(opendir($dh, $dir));
|
||||
my(%dirs, %unexpected, %missing);
|
||||
while (defined(my $file = readdir($dh))) {
|
||||
$dirs{$file} = 1 if -d File::Spec->catdir($dir,$file);
|
||||
}
|
||||
note( "Local $dir subdirs: @{[ keys %dirs ]}" );
|
||||
closedir($dh);
|
||||
my $sth = $dbh->table_info($dir, undef, "%", "TABLE");
|
||||
ok($sth);
|
||||
%unexpected = %dirs;
|
||||
%missing = ();
|
||||
while (my $ref = $sth->fetchrow_hashref()) {
|
||||
if (exists($unexpected{$ref->{'TABLE_NAME'}})) {
|
||||
delete $unexpected{$ref->{'TABLE_NAME'}};
|
||||
} else {
|
||||
$missing{$ref->{'TABLE_NAME'}} = 1;
|
||||
}
|
||||
}
|
||||
ok(keys %unexpected == 0)
|
||||
or diag "Unexpected directories: ", join(",", keys %unexpected), "\n";
|
||||
ok(keys %missing == 0)
|
||||
or diag "Missing directories: ", join(",", keys %missing), "\n";
|
||||
|
||||
note "tables\n";
|
||||
my @tables_expected = (
|
||||
q{"schema"."table"},
|
||||
q{"sch-ema"."table"},
|
||||
q{"schema"."ta-ble"},
|
||||
q{"sch ema"."table"},
|
||||
q{"schema"."ta ble"},
|
||||
);
|
||||
my @tables = $dbh->tables(undef, undef, "%", "VIEW");
|
||||
ok(@tables == @tables_expected, "Table count mismatch".@tables_expected." vs ".@tables);
|
||||
ok($tables[$_] eq $tables_expected[$_], "$tables[$_] ne $tables_expected[$_]")
|
||||
foreach (0..$#tables_expected);
|
||||
|
||||
for (my $i = 0; $i < 300; $i += 100) {
|
||||
note "Testing the fake directories ($i).\n";
|
||||
ok($csr_a = $dbh->prepare("SELECT name, mode FROM long_list_$i"));
|
||||
ok($csr_a->execute(), $DBI::errstr);
|
||||
my $ary = $csr_a->fetchall_arrayref;
|
||||
ok(@$ary == $i, @$ary." rows instead of $i");
|
||||
if ($i) {
|
||||
my @n1 = map { $_->[0] } @$ary;
|
||||
my @n2 = reverse map { "file$_" } 1..$i;
|
||||
ok("@n1" eq "@n2", "'@n1' ne '@n2'");
|
||||
}
|
||||
else {
|
||||
ok(1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
SKIP: {
|
||||
skip "test not tested with Multiplex", 1
|
||||
if $dbh->{mx_handle_list};
|
||||
note "Testing \$dbh->func().\n";
|
||||
my %tables;
|
||||
%tables = map { $_ =~ /lib/ ? ($_, 1) : () } $dbh->tables();
|
||||
my @func_tables = $dbh->func('lib', 'examplep_tables');
|
||||
foreach my $t (@func_tables) {
|
||||
defined(delete $tables{$t}) or print "Unexpected table: $t\n";
|
||||
}
|
||||
is(keys(%tables), 0);
|
||||
}
|
||||
|
||||
{
|
||||
# some tests on special cases for the older tables call
|
||||
# uses DBD::NullP and relies on 2 facts about DBD::NullP:
|
||||
# 1) it has a get_info for for 29 - the quote chr
|
||||
# 2) it has a table_info which returns some types and catalogs
|
||||
my $dbhnp = DBI->connect('dbi:NullP:test');
|
||||
|
||||
# this special case should just return a list of table types
|
||||
my @types = $dbhnp->tables('','','','%');
|
||||
ok(scalar(@types), 'we got some table types');
|
||||
my $defined = grep {defined($_)} @types;
|
||||
is($defined, scalar(@types), 'all table types are defined');
|
||||
SKIP: {
|
||||
skip "some table types were not defined", 1 if ($defined != scalar(@types));
|
||||
my $found_sep = grep {$_ =~ '\.'} @types;
|
||||
is($found_sep, 0, 'no name separators in table types') or diag(Dumper(\@types));
|
||||
};
|
||||
|
||||
# this special case should just return a list of catalogs
|
||||
my @catalogs = $dbhnp->tables('%', '', '');
|
||||
ok(scalar(@catalogs), 'we got some catalogs');
|
||||
SKIP: {
|
||||
skip "no catalogs found", 1 if !scalar(@catalogs);
|
||||
my $found_sep = grep {$_ =~ '\.'} @catalogs;
|
||||
is($found_sep, 0, 'no name separators in catalogs') or diag(Dumper(\@catalogs));
|
||||
};
|
||||
$dbhnp->disconnect;
|
||||
}
|
||||
|
||||
$dbh->disconnect;
|
||||
ok(!$dbh->{Active});
|
||||
ok(!$dbh->ping, "ping should return false after disconnect");
|
||||
|
||||
1;
|
|
@ -0,0 +1,124 @@
|
|||
#!perl -w
|
||||
# vim:ts=8:sw=4
|
||||
$|=1;
|
||||
|
||||
use strict;
|
||||
|
||||
use Test::More;
|
||||
use DBI;
|
||||
use Storable qw(dclone);
|
||||
use Data::Dumper;
|
||||
|
||||
$Data::Dumper::Indent = 1;
|
||||
$Data::Dumper::Sortkeys = 1;
|
||||
$Data::Dumper::Quotekeys = 0;
|
||||
|
||||
plan tests => 24;
|
||||
|
||||
my $dbh = DBI->connect("dbi:Sponge:foo","","", {
|
||||
PrintError => 0,
|
||||
RaiseError => 1,
|
||||
});
|
||||
|
||||
my $source_rows = [ # data for DBD::Sponge to return via fetch
|
||||
[ 41, "AAA", 9 ],
|
||||
[ 41, "BBB", 9 ],
|
||||
[ 42, "BBB", undef ],
|
||||
[ 43, "ccc", 7 ],
|
||||
[ 44, "DDD", 6 ],
|
||||
];
|
||||
|
||||
sub go {
|
||||
my $source = shift || $source_rows;
|
||||
my $sth = $dbh->prepare("foo", {
|
||||
rows => dclone($source),
|
||||
NAME => [ qw(C1 C2 C3) ],
|
||||
});
|
||||
ok($sth->execute(), $DBI::errstr);
|
||||
return $sth;
|
||||
}
|
||||
|
||||
my($sth, $col0, $col1, $col2, $rows);
|
||||
|
||||
# --- fetchrow_arrayref
|
||||
# --- fetchrow_array
|
||||
# etc etc
|
||||
|
||||
# --- fetchall_hashref
|
||||
my @fetchall_hashref_results = ( # single keys
|
||||
C1 => {
|
||||
41 => { C1 => 41, C2 => 'BBB', C3 => 9 },
|
||||
42 => { C1 => 42, C2 => 'BBB', C3 => undef },
|
||||
43 => { C1 => 43, C2 => 'ccc', C3 => 7 },
|
||||
44 => { C1 => 44, C2 => 'DDD', C3 => 6 }
|
||||
},
|
||||
C2 => {
|
||||
AAA => { C1 => 41, C2 => 'AAA', C3 => 9 },
|
||||
BBB => { C1 => 42, C2 => 'BBB', C3 => undef },
|
||||
DDD => { C1 => 44, C2 => 'DDD', C3 => 6 },
|
||||
ccc => { C1 => 43, C2 => 'ccc', C3 => 7 }
|
||||
},
|
||||
[ 'C2' ] => { # single key within arrayref
|
||||
AAA => { C1 => 41, C2 => 'AAA', C3 => 9 },
|
||||
BBB => { C1 => 42, C2 => 'BBB', C3 => undef },
|
||||
DDD => { C1 => 44, C2 => 'DDD', C3 => 6 },
|
||||
ccc => { C1 => 43, C2 => 'ccc', C3 => 7 }
|
||||
},
|
||||
);
|
||||
push @fetchall_hashref_results, ( # multiple keys
|
||||
[ 'C1', 'C2' ] => {
|
||||
'41' => {
|
||||
AAA => { C1 => '41', C2 => 'AAA', C3 => 9 },
|
||||
BBB => { C1 => '41', C2 => 'BBB', C3 => 9 }
|
||||
},
|
||||
'42' => {
|
||||
BBB => { C1 => '42', C2 => 'BBB', C3 => undef }
|
||||
},
|
||||
'43' => {
|
||||
ccc => { C1 => '43', C2 => 'ccc', C3 => 7 }
|
||||
},
|
||||
'44' => {
|
||||
DDD => { C1 => '44', C2 => 'DDD', C3 => 6 }
|
||||
}
|
||||
},
|
||||
);
|
||||
|
||||
my %dump;
|
||||
|
||||
while (my $keyfield = shift @fetchall_hashref_results) {
|
||||
my $expected = shift @fetchall_hashref_results;
|
||||
my $k = (ref $keyfield) ? "[@$keyfield]" : $keyfield;
|
||||
print "# fetchall_hashref($k)\n";
|
||||
ok($sth = go());
|
||||
my $result = $sth->fetchall_hashref($keyfield);
|
||||
ok($result);
|
||||
is_deeply($result, $expected);
|
||||
# $dump{$k} = dclone $result; # just for adding tests
|
||||
}
|
||||
|
||||
warn Dumper \%dump if %dump;
|
||||
|
||||
# test assignment to NUM_OF_FIELDS automatically alters the row buffer
|
||||
$sth = go();
|
||||
my $row = $sth->fetchrow_arrayref;
|
||||
is scalar @$row, 3;
|
||||
is $sth->{NUM_OF_FIELDS}, 3;
|
||||
is scalar @{ $sth->_get_fbav }, 3;
|
||||
$sth->{NUM_OF_FIELDS} = 4;
|
||||
is $sth->{NUM_OF_FIELDS}, 4;
|
||||
is scalar @{ $sth->_get_fbav }, 4;
|
||||
$sth->{NUM_OF_FIELDS} = 2;
|
||||
is $sth->{NUM_OF_FIELDS}, 2;
|
||||
is scalar @{ $sth->_get_fbav }, 2;
|
||||
|
||||
$sth->finish;
|
||||
|
||||
|
||||
if (0) {
|
||||
my @perf = map { [ int($_/100), $_, $_ ] } 0..10000;
|
||||
require Benchmark;
|
||||
Benchmark::timethis(10, sub { go(\@perf)->fetchall_hashref([ 'C1','C2','C3' ]) });
|
||||
}
|
||||
|
||||
|
||||
1; # end
|
|
@ -0,0 +1,48 @@
|
|||
#!perl -w
|
||||
|
||||
use lib qw(blib/arch blib/lib); # needed since -T ignores PERL5LIB
|
||||
use strict;
|
||||
|
||||
use Test::More tests => 10;
|
||||
|
||||
use DBI qw(:sql_types);
|
||||
use Config;
|
||||
use Cwd;
|
||||
|
||||
$^W = 1;
|
||||
$| = 1;
|
||||
|
||||
my $dbh = DBI->connect('dbi:ExampleP:', '', '');
|
||||
|
||||
sub check_quote {
|
||||
# checking quote
|
||||
is($dbh->quote("quote's"), "'quote''s'", '... quoting strings with embedded single quotes');
|
||||
is($dbh->quote("42", SQL_VARCHAR), "'42'", '... quoting number as SQL_VARCHAR');
|
||||
is($dbh->quote("42", SQL_INTEGER), "42", '... quoting number as SQL_INTEGER');
|
||||
is($dbh->quote(undef), "NULL", '... quoting undef as NULL');
|
||||
}
|
||||
|
||||
check_quote();
|
||||
|
||||
sub check_quote_identifier {
|
||||
|
||||
is($dbh->quote_identifier('foo'), '"foo"', '... properly quotes foo as "foo"');
|
||||
is($dbh->quote_identifier('f"o'), '"f""o"', '... properly quotes f"o as "f""o"');
|
||||
is($dbh->quote_identifier('foo','bar'), '"foo"."bar"', '... properly quotes foo, bar as "foo"."bar"');
|
||||
is($dbh->quote_identifier(undef,undef,'bar'), '"bar"', '... properly quotes undef, undef, bar as "bar"');
|
||||
|
||||
is($dbh->quote_identifier('foo',undef,'bar'), '"foo"."bar"', '... properly quotes foo, undef, bar as "foo"."bar"');
|
||||
|
||||
SKIP: {
|
||||
skip "Can't test alternate quote_identifier logic with DBI_AUTOPROXY", 1
|
||||
if $ENV{DBI_AUTOPROXY};
|
||||
my $qi = $dbh->{dbi_quote_identifier_cache} || die "test out of date with dbi internals?";
|
||||
$qi->[1] = '@'; # SQL_CATALOG_NAME_SEPARATOR
|
||||
$qi->[2] = 2; # SQL_CATALOG_LOCATION
|
||||
is($dbh->quote_identifier('foo',undef,'bar'), '"bar"@"foo"', '... now quotes it as "bar"@"foo" after flushing cache');
|
||||
}
|
||||
}
|
||||
|
||||
check_quote_identifier();
|
||||
|
||||
1;
|
|
@ -0,0 +1,133 @@
|
|||
#!perl -wT
|
||||
|
||||
use lib qw(blib/arch blib/lib); # needed since -T ignores PERL5LIB
|
||||
use DBI qw(:sql_types);
|
||||
use Config;
|
||||
use Cwd;
|
||||
use strict;
|
||||
|
||||
|
||||
$^W = 1;
|
||||
$| = 1;
|
||||
|
||||
require VMS::Filespec if $^O eq 'VMS';
|
||||
|
||||
use Test::More;
|
||||
|
||||
# Check Taint attribute works. This requires this test to be run
|
||||
# manually with the -T flag: "perl -T -Mblib t/examp.t"
|
||||
sub is_tainted {
|
||||
my $foo;
|
||||
return ! eval { ($foo=join('',@_)), kill 0; 1; };
|
||||
}
|
||||
sub mk_tainted {
|
||||
my $string = shift;
|
||||
return substr($string.$^X, 0, length($string));
|
||||
}
|
||||
|
||||
plan skip_all => "Taint attributes not supported with DBI::PurePerl" if $DBI::PurePerl;
|
||||
plan skip_all => "Taint attribute tests require taint mode (perl -T)" unless is_tainted($^X);
|
||||
plan skip_all => "Taint attribute tests not functional with DBI_AUTOPROXY" if $ENV{DBI_AUTOPROXY};
|
||||
|
||||
plan tests => 36;
|
||||
|
||||
# get a dir always readable on all platforms
|
||||
my $dir = getcwd() || cwd();
|
||||
$dir = VMS::Filespec::unixify($dir) if $^O eq 'VMS';
|
||||
$dir =~ m/(.*)/; $dir = $1 || die; # untaint $dir
|
||||
|
||||
my ($r, $dbh);
|
||||
|
||||
$dbh = DBI->connect('dbi:ExampleP:', '', '', { PrintError=>0, RaiseError=>1, Taint => 1 });
|
||||
|
||||
my $std_sql = "select mode,size,name from ?";
|
||||
my $csr_a = $dbh->prepare($std_sql);
|
||||
ok(ref $csr_a);
|
||||
|
||||
ok($dbh->{'Taint'});
|
||||
ok($dbh->{'TaintIn'} == 1);
|
||||
ok($dbh->{'TaintOut'} == 1);
|
||||
|
||||
$dbh->{'TaintOut'} = 0;
|
||||
ok($dbh->{'Taint'} == 0);
|
||||
ok($dbh->{'TaintIn'} == 1);
|
||||
ok($dbh->{'TaintOut'} == 0);
|
||||
|
||||
$dbh->{'Taint'} = 0;
|
||||
ok($dbh->{'Taint'} == 0);
|
||||
ok($dbh->{'TaintIn'} == 0);
|
||||
ok($dbh->{'TaintOut'} == 0);
|
||||
|
||||
$dbh->{'TaintIn'} = 1;
|
||||
ok($dbh->{'Taint'} == 0);
|
||||
ok($dbh->{'TaintIn'} == 1);
|
||||
ok($dbh->{'TaintOut'} == 0);
|
||||
|
||||
$dbh->{'TaintOut'} = 1;
|
||||
ok($dbh->{'Taint'} == 1);
|
||||
ok($dbh->{'TaintIn'} == 1);
|
||||
ok($dbh->{'TaintOut'} == 1);
|
||||
|
||||
$dbh->{'Taint'} = 0;
|
||||
my $st;
|
||||
eval { $st = $dbh->prepare($std_sql); };
|
||||
ok(ref $st);
|
||||
|
||||
ok($st->{'Taint'} == 0);
|
||||
|
||||
ok($st->execute( $dir ), 'should execute ok');
|
||||
|
||||
my @row = $st->fetchrow_array;
|
||||
ok(@row);
|
||||
|
||||
ok(!is_tainted($row[0]));
|
||||
ok(!is_tainted($row[1]));
|
||||
ok(!is_tainted($row[2]));
|
||||
|
||||
print "TaintIn\n";
|
||||
$st->{'TaintIn'} = 1;
|
||||
|
||||
@row = $st->fetchrow_array;
|
||||
ok(@row);
|
||||
|
||||
ok(!is_tainted($row[0]));
|
||||
ok(!is_tainted($row[1]));
|
||||
ok(!is_tainted($row[2]));
|
||||
|
||||
print "TaintOut\n";
|
||||
$st->{'TaintOut'} = 1;
|
||||
|
||||
@row = $st->fetchrow_array;
|
||||
ok(@row);
|
||||
|
||||
ok(is_tainted($row[0]));
|
||||
ok(is_tainted($row[1]));
|
||||
ok(is_tainted($row[2]));
|
||||
|
||||
$st->finish;
|
||||
|
||||
my $tainted_sql = mk_tainted($std_sql);
|
||||
my $tainted_dot = mk_tainted('.');
|
||||
|
||||
$dbh->{'Taint'} = $csr_a->{'Taint'} = 1;
|
||||
eval { $dbh->prepare($tainted_sql); 1; };
|
||||
ok($@ =~ /Insecure dependency/, $@);
|
||||
eval { $csr_a->execute($tainted_dot); 1; };
|
||||
ok($@ =~ /Insecure dependency/, $@);
|
||||
undef $@;
|
||||
|
||||
$dbh->{'TaintIn'} = $csr_a->{'TaintIn'} = 0;
|
||||
|
||||
eval { $dbh->prepare($tainted_sql); 1; };
|
||||
ok(!$@, $@);
|
||||
eval { $csr_a->execute($tainted_dot); 1; };
|
||||
ok(!$@, $@);
|
||||
|
||||
$csr_a->{Taint} = 0;
|
||||
ok($csr_a->{Taint} == 0);
|
||||
|
||||
$csr_a->finish;
|
||||
|
||||
$dbh->disconnect;
|
||||
|
||||
1;
|
|
@ -0,0 +1,73 @@
|
|||
#!perl -w
|
||||
# vim:ts=8:sw=4
|
||||
$|=1;
|
||||
|
||||
use Test::More;
|
||||
use DBI;
|
||||
|
||||
eval {
|
||||
require Storable;
|
||||
import Storable qw(dclone);
|
||||
require Encode;
|
||||
import Encode qw(_utf8_on _utf8_off is_utf8);
|
||||
};
|
||||
|
||||
plan skip_all => "Unable to load required module ($@)"
|
||||
unless defined &_utf8_on;
|
||||
|
||||
plan tests => 16;
|
||||
|
||||
$dbh = DBI->connect("dbi:Sponge:foo","","", {
|
||||
PrintError => 0,
|
||||
RaiseError => 1,
|
||||
});
|
||||
|
||||
my $source_rows = [ # data for DBD::Sponge to return via fetch
|
||||
[ 41, "AAA", 9 ],
|
||||
[ 42, "BB", undef ],
|
||||
[ 43, undef, 7 ],
|
||||
[ 44, "DDD", 6 ],
|
||||
];
|
||||
|
||||
my($sth, $col0, $col1, $col2, $rows);
|
||||
|
||||
# set utf8 on one of the columns so we can check it carries through into the
|
||||
# keys of fetchrow_hashref
|
||||
my @col_names = qw(Col1 Col2 Col3);
|
||||
_utf8_on($col_names[1]);
|
||||
ok is_utf8($col_names[1]);
|
||||
ok !is_utf8($col_names[0]);
|
||||
|
||||
$sth = $dbh->prepare("foo", {
|
||||
rows => dclone($source_rows),
|
||||
NAME => \@col_names,
|
||||
});
|
||||
|
||||
ok($sth->bind_columns(\($col0, $col1, $col2)) );
|
||||
ok($sth->execute(), $DBI::errstr);
|
||||
|
||||
ok $sth->fetch;
|
||||
cmp_ok $col1, 'eq', "AAA";
|
||||
ok !is_utf8($col1);
|
||||
|
||||
# force utf8 flag on
|
||||
_utf8_on($col1);
|
||||
ok is_utf8($col1);
|
||||
|
||||
ok $sth->fetch;
|
||||
cmp_ok $col1, 'eq', "BB";
|
||||
# XXX sadly this test doesn't detect the problem when using DBD::Sponge
|
||||
# because DBD::Sponge uses $sth->_set_fbav (correctly) and that uses
|
||||
# sv_setsv which doesn't have the utf8 persistence that sv_setpv does.
|
||||
ok !is_utf8($col1); # utf8 flag should have been reset
|
||||
|
||||
ok $sth->fetch;
|
||||
ok !defined $col1; # null
|
||||
ok !is_utf8($col1); # utf8 flag should have been reset
|
||||
|
||||
ok my $hash = $sth->fetchrow_hashref;
|
||||
ok 1 == grep { is_utf8($_) } keys %$hash;
|
||||
|
||||
$sth->finish;
|
||||
|
||||
# end
|
|
@ -0,0 +1,254 @@
|
|||
#!perl -w
|
||||
$|=1;
|
||||
|
||||
use strict;
|
||||
|
||||
use Test::More tests => 55;
|
||||
|
||||
## ----------------------------------------------------------------------------
|
||||
## 15array.t
|
||||
## ----------------------------------------------------------------------------
|
||||
#
|
||||
## ----------------------------------------------------------------------------
|
||||
|
||||
BEGIN {
|
||||
use_ok('DBI');
|
||||
}
|
||||
|
||||
# create a database handle
|
||||
my $dbh = DBI->connect("dbi:Sponge:dummy", '', '', {
|
||||
RaiseError => 1,
|
||||
ShowErrorStatement => 1,
|
||||
AutoCommit => 1
|
||||
});
|
||||
|
||||
# check that our db handle is good
|
||||
isa_ok($dbh, "DBI::db");
|
||||
|
||||
my $rv;
|
||||
my $rows = [];
|
||||
my $tuple_status = [];
|
||||
my $dumped;
|
||||
|
||||
my $sth = $dbh->prepare("insert", {
|
||||
rows => $rows, # where to 'insert' (push) the rows
|
||||
NUM_OF_PARAMS => 4,
|
||||
execute_hook => sub { # DBD::Sponge hook to make certain data trigger an error for that row
|
||||
local $^W;
|
||||
return $_[0]->set_err(1,"errmsg") if grep { $_ and $_ eq "B" } @_;
|
||||
return 1;
|
||||
}
|
||||
});
|
||||
|
||||
isa_ok($sth, "DBI::st");
|
||||
|
||||
cmp_ok(scalar @{$rows}, '==', 0, '... we should have 0 rows');
|
||||
|
||||
# -----------------------------------------------
|
||||
|
||||
ok(! eval {
|
||||
local $sth->{PrintError} = 0;
|
||||
$sth->execute_array(
|
||||
{
|
||||
ArrayTupleStatus => $tuple_status
|
||||
},
|
||||
[ 1, 2, 3 ], # array of integers
|
||||
42, # scalar 42 treated as array of 42's
|
||||
undef, # scalar undef treated as array of undef's
|
||||
[ qw(A B C) ], # array of strings
|
||||
) },
|
||||
'... execute_array should return false'
|
||||
);
|
||||
ok $@, 'execute_array failure with RaiseError should have died';
|
||||
like $sth->errstr, '/executing 3 generated 1 errors/';
|
||||
|
||||
cmp_ok(scalar @{$rows}, '==', 2, '... we should have 2 rows');
|
||||
cmp_ok(scalar @{$tuple_status}, '==', 3, '... we should have 3 tuple_status');
|
||||
|
||||
ok(eq_array(
|
||||
$rows,
|
||||
[ [1, 42, undef, 'A'], [3, 42, undef, 'C'] ]
|
||||
),
|
||||
'... our rows are as expected');
|
||||
|
||||
ok(eq_array(
|
||||
$tuple_status,
|
||||
[1, [1, 'errmsg', 'S1000'], 1]
|
||||
),
|
||||
'... our tuple_status is as expected');
|
||||
|
||||
# -----------------------------------------------
|
||||
# --- change one param and re-execute
|
||||
|
||||
@$rows = ();
|
||||
ok( $sth->bind_param_array(4, [ qw(a b c) ]), '... bind_param_array should return true');
|
||||
ok( $sth->execute_array({ ArrayTupleStatus => $tuple_status }), '... execute_array should return true');
|
||||
|
||||
cmp_ok(scalar @{$rows}, '==', 3, '... we should have 3 rows');
|
||||
cmp_ok(scalar @{$tuple_status}, '==', 3, '... we should have 3 tuple_status');
|
||||
|
||||
ok(eq_array(
|
||||
$rows,
|
||||
[ [1, 42, undef, 'a'], [2, 42, undef, 'b'], [3, 42, undef, 'c'] ]
|
||||
),
|
||||
'... our rows are as expected');
|
||||
|
||||
ok(eq_array(
|
||||
$tuple_status,
|
||||
[1, 1, 1]
|
||||
),
|
||||
'... our tuple_status is as expected');
|
||||
|
||||
# -----------------------------------------------
|
||||
# --- call execute_array in array context to get executed AND affected
|
||||
@$rows = ();
|
||||
my ($executed, $affected) = $sth->execute_array({ ArrayTupleStatus => $tuple_status });
|
||||
ok($executed, '... execute_array should return true');
|
||||
cmp_ok($executed, '==', 3, '... we should have executed 3 rows');
|
||||
cmp_ok($affected, '==', 3, '... we should have affected 3 rows');
|
||||
|
||||
# -----------------------------------------------
|
||||
# --- with no values for bind params, should execute zero times
|
||||
|
||||
@$rows = ();
|
||||
$rv = $sth->execute_array( { ArrayTupleStatus => $tuple_status }, [], [], [], []);
|
||||
ok($rv, '... execute_array should return true');
|
||||
ok(!($rv+0), '... execute_array should return 0 (but true)');
|
||||
|
||||
cmp_ok(scalar @{$rows}, '==', 0, '... we should have 0 rows');
|
||||
cmp_ok(scalar @{$tuple_status}, '==', 0,'... we should have 0 tuple_status');
|
||||
|
||||
# -----------------------------------------------
|
||||
# --- with only scalar values for bind params, should execute just once
|
||||
|
||||
@$rows = ();
|
||||
$rv = $sth->execute_array( { ArrayTupleStatus => $tuple_status }, 5, 6, 7, 8);
|
||||
cmp_ok($rv, '==', 1, '... execute_array should return 1');
|
||||
|
||||
cmp_ok(scalar @{$rows}, '==', 1, '... we should have 1 rows');
|
||||
ok(eq_array( $rows, [ [5,6,7,8] ]), '... our rows are as expected');
|
||||
cmp_ok(scalar @{$tuple_status}, '==', 1,'... we should have 1 tuple_status');
|
||||
ok(eq_array( $tuple_status, [1]), '... our tuple_status is as expected');
|
||||
|
||||
# -----------------------------------------------
|
||||
# --- with mix of scalar values and arrays only arrays control tuples
|
||||
|
||||
@$rows = ();
|
||||
$rv = $sth->execute_array( { ArrayTupleStatus => $tuple_status }, 5, [], 7, 8);
|
||||
cmp_ok($rv, '==', 0, '... execute_array should return 0');
|
||||
|
||||
cmp_ok(scalar @{$rows}, '==', 0, '... we should have 0 rows');
|
||||
cmp_ok(scalar @{$tuple_status}, '==', 0,'... we should have 0 tuple_status');
|
||||
|
||||
# -----------------------------------------------
|
||||
# --- catch 'undefined value' bug with zero bind values
|
||||
|
||||
@$rows = ();
|
||||
my $sth_other = $dbh->prepare("insert", {
|
||||
rows => $rows, # where to 'insert' (push) the rows
|
||||
NUM_OF_PARAMS => 1,
|
||||
});
|
||||
|
||||
isa_ok($sth_other, "DBI::st");
|
||||
|
||||
$rv = $sth_other->execute_array( {}, [] );
|
||||
ok($rv, '... execute_array should return true');
|
||||
ok(!($rv+0), '... execute_array should return 0 (but true)');
|
||||
# no ArrayTupleStatus
|
||||
|
||||
cmp_ok(scalar @{$rows}, '==', 0, '... we should have 0 rows');
|
||||
|
||||
# -----------------------------------------------
|
||||
# --- ArrayTupleFetch code-ref tests ---
|
||||
|
||||
my $index = 0;
|
||||
|
||||
my $fetchrow = sub { # generate 5 rows of two integer values
|
||||
return if $index >= 2;
|
||||
$index +=1;
|
||||
# There doesn't seem any reliable way to force $index to be
|
||||
# treated as a string (and so dumped as such). We just have to
|
||||
# make the test case allow either 1 or '1'.
|
||||
return [ $index, 'a','b','c' ];
|
||||
};
|
||||
|
||||
@$rows = ();
|
||||
ok( $sth->execute_array({
|
||||
ArrayTupleFetch => $fetchrow,
|
||||
ArrayTupleStatus => $tuple_status
|
||||
}), '... execute_array should return true');
|
||||
|
||||
cmp_ok(scalar @{$rows}, '==', 2, '... we should have 2 rows');
|
||||
cmp_ok(scalar @{$tuple_status}, '==', 2, '... we should have 2 tuple_status');
|
||||
|
||||
ok(eq_array(
|
||||
$rows,
|
||||
[ [1, 'a', 'b', 'c'], [2, 'a', 'b', 'c'] ]
|
||||
),
|
||||
'... rows should match'
|
||||
);
|
||||
|
||||
ok(eq_array(
|
||||
$tuple_status,
|
||||
[1, 1]
|
||||
),
|
||||
'... tuple_status should match'
|
||||
);
|
||||
|
||||
# -----------------------------------------------
|
||||
# --- ArrayTupleFetch sth tests ---
|
||||
|
||||
my $fetch_sth = $dbh->prepare("foo", {
|
||||
rows => [ map { [ $_,'x','y','z' ] } 7..9 ],
|
||||
NUM_OF_FIELDS => 4
|
||||
});
|
||||
|
||||
isa_ok($fetch_sth, "DBI::st");
|
||||
|
||||
$fetch_sth->execute();
|
||||
|
||||
@$rows = ();
|
||||
|
||||
ok( $sth->execute_array({
|
||||
ArrayTupleFetch => $fetch_sth,
|
||||
ArrayTupleStatus => $tuple_status,
|
||||
}), '... execute_array should return true');
|
||||
|
||||
cmp_ok(scalar @{$rows}, '==', 3, '... we should have 3 rows');
|
||||
cmp_ok(scalar @{$tuple_status}, '==', 3, '... we should have 3 tuple_status');
|
||||
|
||||
ok(eq_array(
|
||||
$rows,
|
||||
[ [7, 'x', 'y', 'z'], [8, 'x', 'y', 'z'], [9, 'x', 'y', 'z'] ]
|
||||
),
|
||||
'... rows should match'
|
||||
);
|
||||
|
||||
ok(eq_array(
|
||||
$tuple_status,
|
||||
[1, 1, 1]
|
||||
),
|
||||
'... tuple status should match'
|
||||
);
|
||||
|
||||
# -----------------------------------------------
|
||||
# --- error detection tests ---
|
||||
|
||||
$sth->{RaiseError} = 0;
|
||||
$sth->{PrintError} = 0;
|
||||
|
||||
ok(!defined $sth->execute_array( { ArrayTupleStatus => $tuple_status }, [1],[2]), '... execute_array should return undef');
|
||||
is($sth->errstr, '2 bind values supplied but 4 expected', '... errstr is as expected');
|
||||
|
||||
ok(!defined $sth->execute_array( { ArrayTupleStatus => { } }, [ 1, 2, 3 ]), '... execute_array should return undef');
|
||||
is( $sth->errstr, 'ArrayTupleStatus attribute must be an arrayref', '... errstr is as expected');
|
||||
|
||||
ok(!defined $sth->execute_array( { ArrayTupleStatus => $tuple_status }, 1,{},3,4), '... execute_array should return undef');
|
||||
is( $sth->errstr, 'Value for parameter 2 must be a scalar or an arrayref, not a HASH', '... errstr is as expected');
|
||||
|
||||
ok(!defined $sth->bind_param_array(":foo", [ qw(a b c) ]), '... bind_param_array should return undef');
|
||||
is( $sth->errstr, "Can't use named placeholder ':foo' for non-driver supported bind_param_array", '... errstr is as expected');
|
||||
|
||||
$dbh->disconnect;
|
||||
|
||||
1;
|
|
@ -0,0 +1,148 @@
|
|||
#!perl -w
|
||||
|
||||
use strict;
|
||||
|
||||
use Test::More tests => 20; # use explicit plan to avoid race hazard
|
||||
|
||||
BEGIN{ use_ok( 'DBI' ) }
|
||||
|
||||
my $expect_active;
|
||||
|
||||
## main Test Driver Package
|
||||
{
|
||||
package DBD::Test;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
my $drh = undef;
|
||||
|
||||
sub driver {
|
||||
return $drh if $drh;
|
||||
my ($class, $attr) = @_;
|
||||
$class = "${class}::dr";
|
||||
($drh) = DBI::_new_drh($class, {
|
||||
Name => 'Test',
|
||||
Version => '1.0',
|
||||
}, 77 );
|
||||
return $drh;
|
||||
}
|
||||
|
||||
sub CLONE { undef $drh }
|
||||
}
|
||||
|
||||
## Test Driver
|
||||
{
|
||||
package DBD::Test::dr;
|
||||
|
||||
use warnings;
|
||||
use Test::More;
|
||||
|
||||
sub connect { # normally overridden, but a handy default
|
||||
my($drh, $dbname, $user, $auth, $attrs)= @_;
|
||||
my ($outer, $dbh) = DBI::_new_dbh($drh);
|
||||
$dbh->STORE(Active => 1);
|
||||
$dbh->STORE(AutoCommit => 1);
|
||||
$dbh->STORE( $_ => $attrs->{$_}) for keys %$attrs;
|
||||
return $outer;
|
||||
}
|
||||
|
||||
$DBD::Test::dr::imp_data_size = 0;
|
||||
cmp_ok($DBD::Test::dr::imp_data_size, '==', 0, '... check DBD::Test::dr::imp_data_size to avoid typo');
|
||||
}
|
||||
|
||||
## Test db package
|
||||
{
|
||||
package DBD::Test::db;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use Test::More;
|
||||
|
||||
$DBD::Test::db::imp_data_size = 0;
|
||||
cmp_ok($DBD::Test::db::imp_data_size, '==', 0, '... check DBD::Test::db::imp_data_size to avoid typo');
|
||||
|
||||
sub STORE {
|
||||
my ($dbh, $attrib, $value) = @_;
|
||||
# would normally validate and only store known attributes
|
||||
# else pass up to DBI to handle
|
||||
if ($attrib eq 'AutoCommit') {
|
||||
# convert AutoCommit values to magic ones to let DBI
|
||||
# know that the driver has 'handled' the AutoCommit attribute
|
||||
$value = ($value) ? -901 : -900;
|
||||
}
|
||||
return $dbh->{$attrib} = $value if $attrib =~ /^examplep_/;
|
||||
return $dbh->SUPER::STORE($attrib, $value);
|
||||
}
|
||||
|
||||
sub DESTROY {
|
||||
if ($expect_active < 0) { # inside child
|
||||
my $self = shift;
|
||||
exit ($self->FETCH('Active') || 0) unless $^O eq 'MSWin32';
|
||||
|
||||
# On Win32, the forked child is actually a thread. So don't exit,
|
||||
# and report failure directly.
|
||||
fail 'Child should be inactive on DESTROY' if $self->FETCH('Active');
|
||||
} else {
|
||||
return $expect_active
|
||||
? ok( shift->FETCH('Active'), 'Should be active in DESTROY')
|
||||
: ok( !shift->FETCH('Active'), 'Should not be active in DESTROY');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
my $dsn = 'dbi:ExampleP:dummy';
|
||||
|
||||
$INC{'DBD/Test.pm'} = 'dummy'; # required to fool DBI->install_driver()
|
||||
ok my $drh = DBI->install_driver('Test'), 'Install test driver';
|
||||
|
||||
NOSETTING: {
|
||||
# Try defaults.
|
||||
ok my $dbh = $drh->connect, 'Connect to test driver';
|
||||
ok $dbh->{Active}, 'Should start active';
|
||||
$expect_active = 1;
|
||||
}
|
||||
|
||||
IAD: {
|
||||
# Try InactiveDestroy.
|
||||
ok my $dbh = $drh->connect($dsn, '', '', { InactiveDestroy => 1 }),
|
||||
'Create with ActiveDestroy';
|
||||
ok $dbh->{InactiveDestroy}, 'InactiveDestroy should be set';
|
||||
ok $dbh->{Active}, 'Should start active';
|
||||
$expect_active = 0;
|
||||
}
|
||||
|
||||
AIAD: {
|
||||
# Try AutoInactiveDestroy.
|
||||
ok my $dbh = $drh->connect($dsn, '', '', { AutoInactiveDestroy => 1 }),
|
||||
'Create with AutoInactiveDestroy';
|
||||
ok $dbh->{AutoInactiveDestroy}, 'InactiveDestroy should be set';
|
||||
ok $dbh->{Active}, 'Should start active';
|
||||
$expect_active = 1;
|
||||
}
|
||||
|
||||
FORK: {
|
||||
# Try AutoInactiveDestroy and fork.
|
||||
ok my $dbh = $drh->connect($dsn, '', '', { AutoInactiveDestroy => 1 }),
|
||||
'Create with AutoInactiveDestroy again';
|
||||
ok $dbh->{AutoInactiveDestroy}, 'InactiveDestroy should be set';
|
||||
ok $dbh->{Active}, 'Should start active';
|
||||
|
||||
my $pid = eval { fork() };
|
||||
if (not defined $pid) {
|
||||
chomp $@;
|
||||
my $msg = "AutoInactiveDestroy destroy test skipped";
|
||||
diag "$msg because $@\n";
|
||||
pass $msg; # in lieu of the child status test
|
||||
}
|
||||
elsif ($pid) {
|
||||
# parent.
|
||||
$expect_active = 1;
|
||||
wait;
|
||||
ok $? == 0, 'Child should be inactive on DESTROY';
|
||||
} else {
|
||||
# child.
|
||||
$expect_active = -1;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,205 @@
|
|||
#!perl -w
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use DBI;
|
||||
use Test::More;
|
||||
|
||||
my $skip_error;
|
||||
my $skip_warn;
|
||||
my $handled_errstr;
|
||||
sub error_sub {
|
||||
my ($errstr, $dbh, $ret) = @_;
|
||||
$handled_errstr = $errstr;
|
||||
$handled_errstr =~ s/.* set_err (?:failed|warning): //;
|
||||
return $ret unless ($skip_error and $errstr =~ / set_err failed: /) or ($skip_warn and $errstr =~ / set_err warning:/);
|
||||
$dbh->set_err(undef, undef);
|
||||
return 1;
|
||||
}
|
||||
|
||||
my $dbh = DBI->connect('dbi:ExampleP:.', undef, undef, { PrintError => 0, RaiseError => 0, PrintWarn => 0, RaiseWarn => 0, HandleError => \&error_sub });
|
||||
|
||||
sub clear_err {
|
||||
$dbh->set_err(undef, undef);
|
||||
$handled_errstr = undef;
|
||||
}
|
||||
|
||||
###
|
||||
|
||||
ok eval { $dbh->set_err('', 'string 1'); 1 } or diag($@);
|
||||
is $dbh->err, '';
|
||||
is $dbh->errstr, 'string 1';
|
||||
is $handled_errstr, undef;
|
||||
clear_err;
|
||||
|
||||
ok eval { $dbh->set_err(0, 'string 2'); 1 } or diag($@);
|
||||
is $dbh->err, 0;
|
||||
is $dbh->errstr, 'string 2';
|
||||
is $handled_errstr, undef;
|
||||
clear_err;
|
||||
|
||||
ok eval { $dbh->set_err(1, 'string 3'); 1 } or diag($@);
|
||||
is $dbh->err, 1;
|
||||
is $dbh->errstr, 'string 3';
|
||||
is $handled_errstr, 'string 3';
|
||||
clear_err;
|
||||
|
||||
###
|
||||
|
||||
$dbh->{RaiseError} = 1;
|
||||
|
||||
ok eval { $dbh->set_err('', 'string 4'); 1 } or diag($@);
|
||||
is $dbh->err, '';
|
||||
is $dbh->errstr, 'string 4';
|
||||
is $handled_errstr, undef;
|
||||
clear_err;
|
||||
|
||||
ok eval { $dbh->set_err(0, 'string 5'); 1 } or diag($@);
|
||||
is $dbh->err, 0;
|
||||
is $dbh->errstr, 'string 5';
|
||||
is $handled_errstr, undef;
|
||||
clear_err;
|
||||
|
||||
ok !eval { $dbh->set_err(1, 'string 6'); 1 };
|
||||
is $dbh->err, 1;
|
||||
is $dbh->errstr, 'string 6';
|
||||
is $handled_errstr, 'string 6';
|
||||
clear_err;
|
||||
|
||||
$dbh->{RaiseError} = 0;
|
||||
|
||||
###
|
||||
|
||||
$dbh->{RaiseWarn} = 1;
|
||||
|
||||
ok eval { $dbh->set_err('', 'string 7'); 1 } or diag($@);
|
||||
is $dbh->err, '';
|
||||
is $dbh->errstr, 'string 7';
|
||||
is $handled_errstr, undef;
|
||||
clear_err;
|
||||
|
||||
ok !eval { $dbh->set_err(0, 'string 8'); 1 };
|
||||
is $dbh->err, 0;
|
||||
is $dbh->errstr, 'string 8';
|
||||
is $handled_errstr, 'string 8';
|
||||
clear_err;
|
||||
|
||||
ok eval { $dbh->set_err(1, 'string 9'); 1 } or diag($@);
|
||||
is $dbh->err, 1;
|
||||
is $dbh->errstr, 'string 9';
|
||||
is $handled_errstr, 'string 9';
|
||||
clear_err;
|
||||
|
||||
$dbh->{RaiseWarn} = 0;
|
||||
|
||||
###
|
||||
|
||||
$dbh->{RaiseError} = 1;
|
||||
$dbh->{RaiseWarn} = 1;
|
||||
|
||||
ok eval { $dbh->set_err('', 'string 10'); 1 } or diag($@);
|
||||
is $dbh->err, '';
|
||||
is $dbh->errstr, 'string 10';
|
||||
is $handled_errstr, undef;
|
||||
clear_err;
|
||||
|
||||
ok !eval { $dbh->set_err(0, 'string 11'); 1 };
|
||||
is $dbh->err, 0;
|
||||
is $dbh->errstr, 'string 11';
|
||||
is $handled_errstr, 'string 11';
|
||||
clear_err;
|
||||
|
||||
ok !eval { $dbh->set_err(1, 'string 12'); 1 };
|
||||
is $dbh->err, 1;
|
||||
is $dbh->errstr, 'string 12';
|
||||
is $handled_errstr, 'string 12';
|
||||
clear_err;
|
||||
|
||||
$dbh->{RaiseError} = 0;
|
||||
$dbh->{RaiseWarn} = 0;
|
||||
|
||||
###
|
||||
|
||||
$dbh->{RaiseError} = 1;
|
||||
$skip_error = 1;
|
||||
|
||||
ok eval { $dbh->set_err('', 'string 13'); 1 } or diag($@);
|
||||
is $dbh->err, '';
|
||||
is $dbh->errstr, 'string 13';
|
||||
is $handled_errstr, undef;
|
||||
clear_err;
|
||||
|
||||
ok eval { $dbh->set_err(0, 'string 14'); 1 } or diag($@);
|
||||
is $dbh->err, 0;
|
||||
is $dbh->errstr, 'string 14';
|
||||
is $handled_errstr, undef;
|
||||
clear_err;
|
||||
|
||||
ok eval { $dbh->set_err(1, 'string 15'); 1 } or diag($@);
|
||||
is $dbh->err, undef;
|
||||
is $dbh->errstr, undef;
|
||||
is $handled_errstr, 'string 15';
|
||||
clear_err;
|
||||
|
||||
$dbh->{RaiseError} = 0;
|
||||
$skip_error = 0;
|
||||
|
||||
###
|
||||
|
||||
$dbh->{RaiseWarn} = 1;
|
||||
$skip_warn = 1;
|
||||
|
||||
ok eval { $dbh->set_err('', 'string 16'); 1 } or diag($@);
|
||||
is $dbh->err, '';
|
||||
is $dbh->errstr, 'string 16';
|
||||
is $handled_errstr, undef;
|
||||
clear_err;
|
||||
|
||||
ok eval { $dbh->set_err(0, 'string 17'); 1 } or diag($@);
|
||||
is $dbh->err, undef;
|
||||
is $dbh->errstr, undef;
|
||||
is $handled_errstr, 'string 17';
|
||||
clear_err;
|
||||
|
||||
ok eval { $dbh->set_err(1, 'string 18'); 1 } or diag($@);
|
||||
is $dbh->err, 1;
|
||||
is $dbh->errstr, 'string 18';
|
||||
is $handled_errstr, 'string 18';
|
||||
clear_err;
|
||||
|
||||
$dbh->{RaiseWarn} = 0;
|
||||
$skip_error = 0;
|
||||
|
||||
###
|
||||
|
||||
$dbh->{RaiseError} = 1;
|
||||
$dbh->{RaiseWarn} = 1;
|
||||
$skip_error = 1;
|
||||
$skip_warn = 1;
|
||||
|
||||
ok eval { $dbh->set_err('', 'string 19'); 1 } or diag($@);
|
||||
is $dbh->err, '';
|
||||
is $dbh->errstr, 'string 19';
|
||||
is $handled_errstr, undef;
|
||||
clear_err;
|
||||
|
||||
ok eval { $dbh->set_err(0, 'string 20'); 1 } or diag($@);
|
||||
is $dbh->err, undef;
|
||||
is $dbh->errstr, undef;
|
||||
is $handled_errstr, 'string 20';
|
||||
clear_err;
|
||||
|
||||
ok eval { $dbh->set_err(1, 'string 21'); 1 } or diag($@);
|
||||
is $dbh->err, undef;
|
||||
is $dbh->errstr, undef;
|
||||
is $handled_errstr, 'string 21';
|
||||
clear_err;
|
||||
|
||||
$dbh->{RaiseError} = 0;
|
||||
$dbh->{RaiseWarn} = 0;
|
||||
$skip_error = 0;
|
||||
|
||||
###
|
||||
|
||||
done_testing;
|
|
@ -0,0 +1,301 @@
|
|||
#!perl -w
|
||||
# vim:sw=4:ts=8
|
||||
|
||||
use strict;
|
||||
|
||||
use Test::More tests => 27;
|
||||
|
||||
## ----------------------------------------------------------------------------
|
||||
## 09trace.t
|
||||
## ----------------------------------------------------------------------------
|
||||
#
|
||||
## ----------------------------------------------------------------------------
|
||||
|
||||
BEGIN {
|
||||
use_ok( 'DBI' );
|
||||
}
|
||||
|
||||
$|=1;
|
||||
|
||||
our $fancylogfn = "fancylog$$.log";
|
||||
our $trace_file = "dbitrace$$.log";
|
||||
|
||||
# Clean up when we're done.
|
||||
END { 1 while unlink $fancylogfn;
|
||||
1 while unlink $trace_file; };
|
||||
|
||||
package PerlIO::via::TraceDBI;
|
||||
|
||||
our $logline;
|
||||
|
||||
sub OPEN {
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub PUSHED
|
||||
{
|
||||
my ($class,$mode,$fh) = @_;
|
||||
# When writing we buffer the data
|
||||
my $buf = '';
|
||||
return bless \$buf,$class;
|
||||
}
|
||||
|
||||
sub FILL
|
||||
{
|
||||
my ($obj,$fh) = @_;
|
||||
return $logline;
|
||||
}
|
||||
|
||||
sub READLINE
|
||||
{
|
||||
my ($obj,$fh) = @_;
|
||||
return $logline;
|
||||
}
|
||||
|
||||
sub WRITE
|
||||
{
|
||||
my ($obj,$buf,$fh) = @_;
|
||||
# print "\n*** WRITING $buf\n";
|
||||
$logline = $buf;
|
||||
return length($buf);
|
||||
}
|
||||
|
||||
sub FLUSH
|
||||
{
|
||||
my ($obj,$fh) = @_;
|
||||
return 0;
|
||||
}
|
||||
|
||||
sub CLOSE {
|
||||
# print "\n*** CLOSING!!!\n";
|
||||
$logline = "**** CERRADO! ***";
|
||||
return -1;
|
||||
}
|
||||
|
||||
1;
|
||||
|
||||
package PerlIO::via::MyFancyLogLayer;
|
||||
|
||||
sub OPEN {
|
||||
my ($obj, $path, $mode, $fh) = @_;
|
||||
$$obj = $path;
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub PUSHED
|
||||
{
|
||||
my ($class,$mode,$fh) = @_;
|
||||
# When writing we buffer the data
|
||||
my $logger;
|
||||
return bless \$logger,$class;
|
||||
}
|
||||
|
||||
sub WRITE
|
||||
{
|
||||
my ($obj,$buf,$fh) = @_;
|
||||
$$obj->log($buf);
|
||||
return length($buf);
|
||||
}
|
||||
|
||||
sub FLUSH
|
||||
{
|
||||
my ($obj,$fh) = @_;
|
||||
return 0;
|
||||
}
|
||||
|
||||
sub CLOSE {
|
||||
my $self = shift;
|
||||
$$self->close();
|
||||
return 0;
|
||||
}
|
||||
|
||||
1;
|
||||
|
||||
package MyFancyLogger;
|
||||
|
||||
use Symbol qw(gensym);
|
||||
|
||||
sub new
|
||||
{
|
||||
my $self = {};
|
||||
my $fh = gensym();
|
||||
open $fh, '>', $fancylogfn;
|
||||
$self->{_fh} = $fh;
|
||||
$self->{_buf} = '';
|
||||
return bless $self, shift;
|
||||
}
|
||||
|
||||
sub log
|
||||
{
|
||||
my $self = shift;
|
||||
my $fh = $self->{_fh};
|
||||
$self->{_buf} .= shift;
|
||||
print $fh "At ", scalar localtime(), ':', $self->{_buf}, "\n" and
|
||||
$self->{_buf} = ''
|
||||
if $self->{_buf}=~tr/\n//;
|
||||
}
|
||||
|
||||
sub close {
|
||||
my $self = shift;
|
||||
return unless exists $self->{_fh};
|
||||
my $fh = $self->{_fh};
|
||||
print $fh "At ", scalar localtime(), ':', $self->{_buf}, "\n" and
|
||||
$self->{_buf} = ''
|
||||
if $self->{_buf};
|
||||
close $fh;
|
||||
delete $self->{_fh};
|
||||
}
|
||||
|
||||
1;
|
||||
|
||||
package main;
|
||||
|
||||
## ----------------------------------------------------------------------------
|
||||
# Connect to the example driver.
|
||||
|
||||
my $dbh = DBI->connect('dbi:ExampleP:dummy', '', '',
|
||||
{ PrintError => 0,
|
||||
RaiseError => 1,
|
||||
PrintWarn => 0,
|
||||
RaiseWarn => 1,
|
||||
});
|
||||
isa_ok( $dbh, 'DBI::db' );
|
||||
|
||||
# Clean up when we're done.
|
||||
END { $dbh->disconnect if $dbh };
|
||||
|
||||
## ----------------------------------------------------------------------------
|
||||
# Check the database handle attributes.
|
||||
|
||||
cmp_ok($dbh->{TraceLevel}, '==', $DBI::dbi_debug & 0xF, '... checking TraceLevel attribute');
|
||||
|
||||
1 while unlink $trace_file;
|
||||
|
||||
my $tracefd;
|
||||
## ----------------------------------------------------------------------------
|
||||
# First use regular filehandle
|
||||
open $tracefd, '>>', $trace_file;
|
||||
|
||||
my $oldfd = select($tracefd);
|
||||
$| = 1;
|
||||
select $oldfd;
|
||||
|
||||
ok(-f $trace_file, '... regular fh: trace file successfully created');
|
||||
|
||||
$dbh->trace(2, $tracefd);
|
||||
ok( 1, '... regular fh: filehandle successfully set');
|
||||
|
||||
#
|
||||
# read current size of file
|
||||
#
|
||||
my $filesz = (stat $tracefd)[7];
|
||||
$dbh->trace_msg("First logline\n", 1);
|
||||
#
|
||||
# read new file size and verify its different
|
||||
#
|
||||
my $newfsz = (stat $tracefd)[7];
|
||||
SKIP: {
|
||||
skip 'on VMS autoflush using select does not work', 1 if $^O eq 'VMS';
|
||||
ok(($filesz != $newfsz), '... regular fh: trace_msg');
|
||||
}
|
||||
|
||||
$dbh->trace(undef, "STDOUT"); # close $trace_file
|
||||
ok(-f $trace_file, '... regular fh: file successfully changed');
|
||||
|
||||
$filesz = (stat $tracefd)[7];
|
||||
$dbh->trace_msg("Next logline\n");
|
||||
#
|
||||
# read new file size and verify its same
|
||||
#
|
||||
$newfsz = (stat $tracefd)[7];
|
||||
ok(($filesz == $newfsz), '... regular fh: trace_msg after changing trace output');
|
||||
|
||||
#1 while unlink $trace_file;
|
||||
|
||||
$dbh->trace(0); # disable trace
|
||||
|
||||
{ # Open trace to glob. started failing in perl-5.10
|
||||
my $tf = "foo.log.$$";
|
||||
1 while unlink $tf;
|
||||
1 while unlink "*main::FOO";
|
||||
1 while unlink "*main::STDERR";
|
||||
is (-f $tf, undef, "Tracefile removed");
|
||||
ok (open (FOO, ">", $tf), "Tracefile FOO opened");
|
||||
ok (-f $tf, "Tracefile created");
|
||||
DBI->trace (1, *FOO);
|
||||
is (-f "*main::FOO", undef, "Regression test");
|
||||
DBI->trace_msg ("foo\n", 1);
|
||||
DBI->trace (0, *STDERR);
|
||||
close FOO;
|
||||
open my $fh, "<", $tf;
|
||||
is ((<$fh>)[-1], "foo\n", "Traced message");
|
||||
close $fh;
|
||||
is (-f "*main::STDERR", undef, "Regression test");
|
||||
1 while unlink $tf;
|
||||
}
|
||||
|
||||
## ----------------------------------------------------------------------------
|
||||
# Then use layered filehandle
|
||||
#
|
||||
open TRACEFD, '+>:via(TraceDBI)', 'layeredtrace.out';
|
||||
print TRACEFD "*** Test our layer\n";
|
||||
my $result = <TRACEFD>;
|
||||
is $result, "*** Test our layer\n", "... layered fh: file is layered: $result\n";
|
||||
|
||||
$dbh->trace(1, \*TRACEFD);
|
||||
ok( 1, '... layered fh: filehandle successfully set');
|
||||
|
||||
$dbh->trace_msg("Layered logline\n", 1);
|
||||
|
||||
$result = <TRACEFD>;
|
||||
is $result, "Layered logline\n", "... layered fh: trace_msg: $result\n";
|
||||
|
||||
$dbh->trace(1, "STDOUT"); # close $trace_file
|
||||
$result = <TRACEFD>;
|
||||
is $result, "Layered logline\n", "... layered fh: close doesn't close: $result\n";
|
||||
|
||||
$dbh->trace_msg("Next logline\n", 1);
|
||||
$result = <TRACEFD>;
|
||||
is $result, "Layered logline\n", "... layered fh: trace_msg after change trace output: $result\n";
|
||||
|
||||
## ----------------------------------------------------------------------------
|
||||
# Then use scalar filehandle
|
||||
#
|
||||
my $tracestr;
|
||||
open TRACEFD, '+>:scalar', \$tracestr;
|
||||
print TRACEFD "*** Test our layer\n";
|
||||
ok 1, "... scalar trace: file is layered: $tracestr\n";
|
||||
|
||||
$dbh->trace(1, \*TRACEFD);
|
||||
ok 1, '... scalar trace: filehandle successfully set';
|
||||
|
||||
$dbh->trace_msg("Layered logline\n", 1);
|
||||
ok 1, "... scalar trace: $tracestr\n";
|
||||
|
||||
$dbh->trace(1, "STDOUT"); # close $trace_file
|
||||
ok 1, "... scalar trace: close doesn't close: $tracestr\n";
|
||||
|
||||
$dbh->trace_msg("Next logline\n", 1);
|
||||
ok 1, "... scalar trace: after change trace output: $tracestr\n";
|
||||
|
||||
## ----------------------------------------------------------------------------
|
||||
# Then use fancy logger
|
||||
#
|
||||
open my $fh, '>:via(MyFancyLogLayer)', MyFancyLogger->new();
|
||||
|
||||
$dbh->trace('SQL', $fh);
|
||||
|
||||
$dbh->trace_msg("Layered logline\n", 1);
|
||||
ok 1, "... logger: trace_msg\n";
|
||||
|
||||
$dbh->trace(1, "STDOUT"); # close $trace_file
|
||||
ok 1, "... logger: close doesn't close\n";
|
||||
|
||||
$dbh->trace_msg("Next logline\n", 1);
|
||||
ok 1, "... logger: trace_msg after change trace output\n";
|
||||
|
||||
close $fh;
|
||||
|
||||
1;
|
||||
|
||||
# end
|
|
@ -0,0 +1,32 @@
|
|||
#!perl -w
|
||||
|
||||
use strict;
|
||||
use Test::More tests => 8;
|
||||
|
||||
$|=1;
|
||||
$^W=1;
|
||||
|
||||
BEGIN { use_ok( 'DBI', ':sql_types' ) }
|
||||
BEGIN { use_ok( 'DBI::DBD::Metadata' ) } # just to check for syntax errors etc
|
||||
|
||||
my $dbh = DBI->connect("dbi:ExampleP:.","","", { FetchHashKeyName => 'NAME_lc' })
|
||||
or die "Unable to connect to ExampleP driver: $DBI::errstr";
|
||||
|
||||
isa_ok($dbh, 'DBI::db');
|
||||
#$dbh->trace(3);
|
||||
|
||||
#use Data::Dumper;
|
||||
#print Dumper($dbh->type_info_all);
|
||||
#print Dumper($dbh->type_info);
|
||||
#print Dumper($dbh->type_info(DBI::SQL_INTEGER));
|
||||
|
||||
my @ti = $dbh->type_info;
|
||||
ok(@ti>0);
|
||||
|
||||
is($dbh->type_info(SQL_INTEGER)->{DATA_TYPE}, SQL_INTEGER);
|
||||
is($dbh->type_info(SQL_INTEGER)->{TYPE_NAME}, 'INTEGER');
|
||||
|
||||
is($dbh->type_info(SQL_VARCHAR)->{DATA_TYPE}, SQL_VARCHAR);
|
||||
is($dbh->type_info(SQL_VARCHAR)->{TYPE_NAME}, 'VARCHAR');
|
||||
|
||||
1;
|
|
@ -0,0 +1,182 @@
|
|||
#!perl -w
|
||||
|
||||
use strict;
|
||||
|
||||
$|=1;
|
||||
$^W=1;
|
||||
|
||||
my $calls = 0;
|
||||
my %my_methods;
|
||||
|
||||
|
||||
# =================================================
|
||||
# Example code for sub classing the DBI.
|
||||
#
|
||||
# Note that the extra ::db and ::st classes must be set up
|
||||
# as sub classes of the corresponding DBI classes.
|
||||
#
|
||||
# This whole mechanism is new and experimental - it may change!
|
||||
|
||||
package MyDBI;
|
||||
@MyDBI::ISA = qw(DBI);
|
||||
|
||||
# the MyDBI::dr::connect method is NOT called!
|
||||
# you can either override MyDBI::connect()
|
||||
# or use MyDBI::db::connected()
|
||||
|
||||
package MyDBI::db;
|
||||
@MyDBI::db::ISA = qw(DBI::db);
|
||||
|
||||
sub prepare {
|
||||
my($dbh, @args) = @_;
|
||||
++$my_methods{prepare};
|
||||
++$calls;
|
||||
my $sth = $dbh->SUPER::prepare(@args);
|
||||
return $sth;
|
||||
}
|
||||
|
||||
|
||||
package MyDBI::st;
|
||||
@MyDBI::st::ISA = qw(DBI::st);
|
||||
|
||||
sub fetch {
|
||||
my($sth, @args) = @_;
|
||||
++$my_methods{fetch};
|
||||
++$calls;
|
||||
# this is just to trigger (re)STORE on exit to test that the STORE
|
||||
# doesn't clear any erro condition
|
||||
local $sth->{Taint} = 0;
|
||||
my $row = $sth->SUPER::fetch(@args);
|
||||
if ($row) {
|
||||
# modify fetched data as an example
|
||||
$row->[1] = lc($row->[1]);
|
||||
|
||||
# also demonstrate calling set_err()
|
||||
return $sth->set_err(1,"Don't be so negative",undef,"fetch")
|
||||
if $row->[0] < 0;
|
||||
# ... and providing alternate results
|
||||
# (although typically would trap and hide and error from SUPER::fetch)
|
||||
return $sth->set_err(2,"Don't exaggerate",undef, undef, [ 42,"zz",0 ])
|
||||
if $row->[0] > 42;
|
||||
}
|
||||
return $row;
|
||||
}
|
||||
|
||||
|
||||
# =================================================
|
||||
package main;
|
||||
|
||||
use Test::More tests => 43;
|
||||
|
||||
BEGIN {
|
||||
use_ok( 'DBI' );
|
||||
}
|
||||
|
||||
my $tmp;
|
||||
|
||||
#DBI->trace(2);
|
||||
my $dbh = MyDBI->connect("dbi:Sponge:foo","","", {
|
||||
PrintError => 0,
|
||||
RaiseError => 1,
|
||||
CompatMode => 1, # just for clone test
|
||||
});
|
||||
isa_ok($dbh, 'MyDBI::db');
|
||||
is($dbh->{CompatMode}, 1);
|
||||
undef $dbh;
|
||||
|
||||
$dbh = DBI->connect("dbi:Sponge:foo","","", {
|
||||
PrintError => 0,
|
||||
RaiseError => 1,
|
||||
RootClass => "MyDBI",
|
||||
CompatMode => 1, # just for clone test
|
||||
dbi_foo => 1, # just to help debugging clone etc
|
||||
});
|
||||
isa_ok( $dbh, 'MyDBI::db');
|
||||
is($dbh->{CompatMode}, 1);
|
||||
|
||||
#$dbh->trace(5);
|
||||
my $sth = $dbh->prepare("foo",
|
||||
# data for DBD::Sponge to return via fetch
|
||||
{ rows => [
|
||||
[ 40, "AAA", 9 ],
|
||||
[ 41, "BB", 8 ],
|
||||
[ -1, "C", 7 ],
|
||||
[ 49, "DD", 6 ]
|
||||
],
|
||||
}
|
||||
);
|
||||
|
||||
is($calls, 1);
|
||||
isa_ok($sth, 'MyDBI::st');
|
||||
|
||||
my $row = $sth->fetch;
|
||||
is($calls, 2);
|
||||
is($row->[1], "aaa");
|
||||
|
||||
$row = $sth->fetch;
|
||||
is($calls, 3);
|
||||
is($row->[1], "bb");
|
||||
|
||||
is($DBI::err, undef);
|
||||
$row = eval { $sth->fetch };
|
||||
my $eval_err = $@;
|
||||
is(!defined $row, 1);
|
||||
is(substr($eval_err,0,50), "DBD::Sponge::st fetch failed: Don't be so negative");
|
||||
|
||||
#$sth->trace(5);
|
||||
#$sth->{PrintError} = 1;
|
||||
$sth->{RaiseError} = 0;
|
||||
$row = eval { $sth->fetch };
|
||||
isa_ok($row, 'ARRAY');
|
||||
is($row->[0], 42);
|
||||
is($DBI::err, 2);
|
||||
like($DBI::errstr, qr/Don't exaggerate/);
|
||||
is($@ =~ /Don't be so negative/, $@);
|
||||
|
||||
|
||||
my $dbh2 = $dbh->clone;
|
||||
isa_ok( $dbh2, 'MyDBI::db', "Clone A" );
|
||||
is($dbh2 != $dbh, 1);
|
||||
is($dbh2->{CompatMode}, 1);
|
||||
|
||||
my $dbh3 = $dbh->clone({});
|
||||
isa_ok( $dbh3, 'MyDBI::db', 'Clone B' );
|
||||
is($dbh3 != $dbh, 1);
|
||||
is($dbh3 != $dbh2, 1);
|
||||
isa_ok( $dbh3, 'MyDBI::db');
|
||||
is($dbh3->{CompatMode}, 1);
|
||||
|
||||
my $dbh2c = $dbh2->clone;
|
||||
isa_ok( $dbh2c, 'MyDBI::db', "Clone of clone A" );
|
||||
is($dbh2c != $dbh2, 1);
|
||||
is($dbh2c->{CompatMode}, 1);
|
||||
|
||||
my $dbh3c = $dbh3->clone({ CompatMode => 0 });
|
||||
isa_ok( $dbh3c, 'MyDBI::db', 'Clone of clone B' );
|
||||
is((grep { $dbh3c == $_ } $dbh, $dbh2, $dbh3), 0);
|
||||
isa_ok( $dbh3c, 'MyDBI::db');
|
||||
ok(!$dbh3c->{CompatMode});
|
||||
|
||||
$tmp = $dbh->sponge_test_installed_method('foo','bar');
|
||||
isa_ok( $tmp, "ARRAY", "installed method" );
|
||||
is_deeply( $tmp, [qw( foo bar )] );
|
||||
$tmp = eval { $dbh->sponge_test_installed_method() };
|
||||
is(!$tmp, 1);
|
||||
is($dbh->err, 42);
|
||||
is($dbh->errstr, "not enough parameters");
|
||||
|
||||
|
||||
$dbh = eval { DBI->connect("dbi:Sponge:foo","","", {
|
||||
RootClass => 'nonesuch1', PrintError => 0, RaiseError => 0, });
|
||||
};
|
||||
ok( !defined($dbh), "Failed connect #1" );
|
||||
is(substr($@,0,25), "Can't locate nonesuch1.pm");
|
||||
|
||||
$dbh = eval { nonesuch2->connect("dbi:Sponge:foo","","", {
|
||||
PrintError => 0, RaiseError => 0, });
|
||||
};
|
||||
ok( !defined($dbh), "Failed connect #2" );
|
||||
is(substr($@,0,36), q{Can't locate object method "connect"});
|
||||
|
||||
print "@{[ %my_methods ]}\n";
|
||||
1;
|
|
@ -0,0 +1,157 @@
|
|||
#!perl -w
|
||||
#
|
||||
# check that the inner-method lookup cache works
|
||||
# (or rather, check that it doesn't cache things when it shouldn't)
|
||||
|
||||
BEGIN { eval "use threads;" } # Must be first
|
||||
my $use_threads_err = $@;
|
||||
use Config qw(%Config);
|
||||
# With this test code and threads, 5.8.1 has issues with freeing freed
|
||||
# scalars, while 5.8.9 doesn't; I don't know about in-between - DAPM
|
||||
my $has_threads = $Config{useithreads};
|
||||
die $use_threads_err if $has_threads && $use_threads_err;
|
||||
|
||||
|
||||
use strict;
|
||||
|
||||
$|=1;
|
||||
$^W=1;
|
||||
|
||||
|
||||
|
||||
use Test::More tests => 49;
|
||||
|
||||
BEGIN {
|
||||
use_ok( 'DBI' );
|
||||
}
|
||||
|
||||
sub new_handle {
|
||||
my $dbh = DBI->connect("dbi:Sponge:foo","","", {
|
||||
PrintError => 0,
|
||||
RaiseError => 1,
|
||||
});
|
||||
|
||||
my $sth = $dbh->prepare("foo",
|
||||
# data for DBD::Sponge to return via fetch
|
||||
{ rows =>
|
||||
[
|
||||
[ "row0" ],
|
||||
[ "row1" ],
|
||||
[ "row2" ],
|
||||
[ "row3" ],
|
||||
[ "row4" ],
|
||||
[ "row5" ],
|
||||
[ "row6" ],
|
||||
],
|
||||
}
|
||||
);
|
||||
|
||||
return ($dbh, $sth);
|
||||
}
|
||||
|
||||
|
||||
sub Foo::local1 { [ "local1" ] };
|
||||
sub Foo::local2 { [ "local2" ] };
|
||||
|
||||
|
||||
my $fetch_hook;
|
||||
{
|
||||
package Bar;
|
||||
@Bar::ISA = qw(DBD::_::st);
|
||||
sub fetch { &$fetch_hook };
|
||||
}
|
||||
|
||||
sub run_tests {
|
||||
my ($desc, $dbh, $sth) = @_;
|
||||
my $row = $sth->fetch;
|
||||
is($row->[0], "row0", "$desc row0");
|
||||
|
||||
{
|
||||
# replace CV slot
|
||||
no warnings 'redefine';
|
||||
local *DBD::Sponge::st::fetch = sub { [ "local0" ] };
|
||||
$row = $sth->fetch;
|
||||
is($row->[0], "local0", "$desc local0");
|
||||
}
|
||||
$row = $sth->fetch;
|
||||
is($row->[0], "row1", "$desc row1");
|
||||
|
||||
{
|
||||
# replace GP
|
||||
local *DBD::Sponge::st::fetch = *Foo::local1;
|
||||
$row = $sth->fetch;
|
||||
is($row->[0], "local1", "$desc local1");
|
||||
}
|
||||
$row = $sth->fetch;
|
||||
is($row->[0], "row2", "$desc row2");
|
||||
|
||||
{
|
||||
# replace GV
|
||||
local $DBD::Sponge::st::{fetch} = *Foo::local2;
|
||||
$row = $sth->fetch;
|
||||
is($row->[0], "local2", "$desc local2");
|
||||
}
|
||||
$row = $sth->fetch;
|
||||
is($row->[0], "row3", "$desc row3");
|
||||
|
||||
{
|
||||
# @ISA = NoSuchPackage
|
||||
local $DBD::Sponge::st::{fetch};
|
||||
local @DBD::Sponge::st::ISA = qw(NoSuchPackage);
|
||||
eval { local $SIG{__WARN__} = sub {}; $row = $sth->fetch };
|
||||
like($@, qr/Can't locate DBI object method/, "$desc locate DBI object");
|
||||
}
|
||||
$row = $sth->fetch;
|
||||
is($row->[0], "row4", "$desc row4");
|
||||
|
||||
{
|
||||
# @ISA = Bar
|
||||
$fetch_hook = \&DBD::Sponge::st::fetch;
|
||||
local $DBD::Sponge::st::{fetch};
|
||||
local @DBD::Sponge::st::ISA = qw(Bar);
|
||||
$row = $sth->fetch;
|
||||
is($row->[0], "row5", "$desc row5");
|
||||
$fetch_hook = sub { [ "local3" ] };
|
||||
$row = $sth->fetch;
|
||||
is($row->[0], "local3", "$desc local3");
|
||||
}
|
||||
$row = $sth->fetch;
|
||||
is($row->[0], "row6", "$desc row6");
|
||||
}
|
||||
|
||||
run_tests("plain", new_handle());
|
||||
|
||||
|
||||
SKIP: {
|
||||
skip "no threads / perl < 5.8.9", 12 unless $has_threads;
|
||||
# only enable this when handles are allowed to be shared across threads
|
||||
#{
|
||||
# my @h = new_handle();
|
||||
# threads->new(sub { run_tests("threads", @h) })->join;
|
||||
#}
|
||||
threads->new(sub { run_tests("threads-h", new_handle()) })->join;
|
||||
};
|
||||
|
||||
# using weaken attaches magic to the CV; see whether this interferes
|
||||
# with the cache magic
|
||||
|
||||
use Scalar::Util qw(weaken);
|
||||
my $fetch_ref = \&DBI::st::fetch;
|
||||
weaken $fetch_ref;
|
||||
run_tests("magic", new_handle());
|
||||
|
||||
SKIP: {
|
||||
skip "no threads / perl < 5.8.9", 12 unless $has_threads;
|
||||
|
||||
skip "weaken itself is buggy on 5.8.1 (magic killbackrefs panic "
|
||||
."triggered by threads, fixed in 5.8.2)"
|
||||
, 12 unless $] > 5.008001;
|
||||
# only enable this when handles are allowed to be shared across threads
|
||||
#{
|
||||
# my @h = new_handle();
|
||||
# threads->new(sub { run_tests("threads", @h) })->join;
|
||||
#}
|
||||
threads->new(sub { run_tests("magic threads-h", new_handle()) })->join;
|
||||
};
|
||||
|
||||
1;
|
|
@ -0,0 +1,86 @@
|
|||
#!perl -w
|
||||
$|=1;
|
||||
|
||||
# --- Test DBI support for threads created after the DBI was loaded
|
||||
|
||||
BEGIN { eval "use threads;" } # Must be first
|
||||
my $use_threads_err = $@;
|
||||
|
||||
use strict;
|
||||
use Config qw(%Config);
|
||||
use Test::More;
|
||||
|
||||
BEGIN {
|
||||
if (!$Config{useithreads}) {
|
||||
plan skip_all => "this $^O perl $] not supported for DBI iThreads";
|
||||
}
|
||||
die $use_threads_err if $use_threads_err; # need threads
|
||||
}
|
||||
|
||||
my $threads = 4;
|
||||
plan tests => 4 + 4 * $threads;
|
||||
|
||||
{
|
||||
package threads_sub;
|
||||
use base qw(threads);
|
||||
}
|
||||
|
||||
use_ok('DBI');
|
||||
|
||||
$DBI::PurePerl = $DBI::PurePerl; # just to silence used only once warning
|
||||
$DBI::neat_maxlen = 12345;
|
||||
cmp_ok($DBI::neat_maxlen, '==', 12345, '... assignment of neat_maxlen was successful');
|
||||
|
||||
my @connect_args = ("dbi:ExampleP:", '', '');
|
||||
|
||||
my $dbh_parent = DBI->connect_cached(@connect_args);
|
||||
isa_ok( $dbh_parent, 'DBI::db' );
|
||||
|
||||
# this our function for the threads to run
|
||||
|
||||
sub testing {
|
||||
cmp_ok($DBI::neat_maxlen, '==', 12345, '... DBI::neat_maxlen still holding its value');
|
||||
|
||||
my $dbh = DBI->connect_cached(@connect_args);
|
||||
isa_ok( $dbh, 'DBI::db' );
|
||||
isnt($dbh, $dbh_parent, '... new $dbh is not the same instance as $dbh_parent');
|
||||
|
||||
SKIP: {
|
||||
# skip seems broken with threads (5.8.3)
|
||||
# skip "Kids attribute not supported under DBI::PurePerl", 1 if $DBI::PurePerl;
|
||||
|
||||
cmp_ok($dbh->{Driver}->{Kids}, '==', 1, '... the Driver has one Kid')
|
||||
unless $DBI::PurePerl && ok(1);
|
||||
}
|
||||
|
||||
# RT #77137: a thread created from a thread was crashing the
|
||||
# interpreter
|
||||
my $subthread = threads->new(sub {});
|
||||
|
||||
# provide a little insurance against thread scheduling issues (hopefully)
|
||||
# http://www.nntp.perl.org/group/perl.cpan.testers/2009/06/msg4369660.html
|
||||
eval { select undef, undef, undef, 0.2 };
|
||||
|
||||
$subthread->join();
|
||||
}
|
||||
|
||||
# load up the threads
|
||||
|
||||
my @thr;
|
||||
push @thr, threads_sub->create( \&testing )
|
||||
or die "thread->create failed ($!)"
|
||||
foreach (1..$threads);
|
||||
|
||||
# join all the threads
|
||||
|
||||
foreach my $thread (@thr) {
|
||||
# provide a little insurance against thread scheduling issues (hopefully)
|
||||
# http://www.nntp.perl.org/group/perl.cpan.testers/2009/06/msg4369660.html
|
||||
eval { select undef, undef, undef, 0.2 };
|
||||
|
||||
$thread->join;
|
||||
}
|
||||
|
||||
pass('... all tests have passed');
|
||||
|
||||
1;
|
|
@ -0,0 +1,485 @@
|
|||
#!perl -w
|
||||
$|=1;
|
||||
|
||||
#
|
||||
# test script for DBI::Profile
|
||||
#
|
||||
|
||||
use strict;
|
||||
|
||||
use Config;
|
||||
use DBI::Profile;
|
||||
use DBI qw(dbi_time);
|
||||
use Data::Dumper;
|
||||
use File::Spec;
|
||||
use Storable qw(dclone);
|
||||
|
||||
use Test::More;
|
||||
|
||||
BEGIN {
|
||||
plan skip_all => "profiling not supported for DBI::PurePerl"
|
||||
if $DBI::PurePerl;
|
||||
|
||||
# tie methods (STORE/FETCH etc) get called different number of times
|
||||
plan skip_all => "test results assume perl >= 5.8.2"
|
||||
if $] <= 5.008001;
|
||||
|
||||
# clock instability on xen systems is a reasonably common cause of failure
|
||||
# http://www.nntp.perl.org/group/perl.cpan.testers/2009/05/msg3828158.html
|
||||
# so we'll skip automated testing on those systems
|
||||
plan skip_all => "skipping profile tests on xen (due to clock instability)"
|
||||
if $Config{osvers} =~ /xen/ # eg 2.6.18-4-xen-amd64
|
||||
and $ENV{AUTOMATED_TESTING};
|
||||
|
||||
plan tests => 60;
|
||||
}
|
||||
|
||||
$Data::Dumper::Indent = 1;
|
||||
$Data::Dumper::Terse = 1;
|
||||
|
||||
# log file to store profile results
|
||||
my $LOG_FILE = "test_output_profile$$.log";
|
||||
my $orig_dbi_debug = $DBI::dbi_debug;
|
||||
DBI->trace($DBI::dbi_debug, $LOG_FILE);
|
||||
END {
|
||||
return if $orig_dbi_debug;
|
||||
1 while unlink $LOG_FILE;
|
||||
}
|
||||
|
||||
|
||||
print "Test enabling the profile\n";
|
||||
|
||||
# make sure profiling starts disabled
|
||||
my $dbh = DBI->connect("dbi:ExampleP:", '', '', { RaiseError=>1 });
|
||||
ok($dbh, 'connect');
|
||||
ok(!$dbh->{Profile} && !$ENV{DBI_PROFILE}, 'Profile and DBI_PROFILE not set');
|
||||
|
||||
|
||||
# can turn it on after the fact using a path number
|
||||
$dbh = DBI->connect("dbi:ExampleP:", '', '', { RaiseError=>1 });
|
||||
$dbh->{Profile} = "4";
|
||||
is_deeply sanitize_tree($dbh->{Profile}), bless {
|
||||
'Path' => [ '!MethodName' ],
|
||||
} => 'DBI::Profile';
|
||||
|
||||
# using a package name
|
||||
$dbh = DBI->connect("dbi:ExampleP:", '', '', { RaiseError=>1 });
|
||||
$dbh->{Profile} = "/DBI::Profile";
|
||||
is_deeply sanitize_tree($dbh->{Profile}), bless {
|
||||
'Path' => [ ],
|
||||
} => 'DBI::Profile';
|
||||
|
||||
# using a combined path and name
|
||||
$dbh = DBI->connect("dbi:ExampleP:", '', '', { RaiseError=>1 });
|
||||
$dbh->{Profile} = "20/DBI::Profile";
|
||||
is_deeply sanitize_tree($dbh->{Profile}), bless {
|
||||
'Path' => [ '!MethodName', '!Caller2' ],
|
||||
} => 'DBI::Profile';
|
||||
|
||||
my $t_file = __FILE__;
|
||||
$dbh->do("set foo=1"); my $line = __LINE__;
|
||||
my $expected_caller = "40profile.t line $line";
|
||||
$expected_caller .= " via ${1}40profile.t line 4"
|
||||
if $0 =~ /(zv\w+_)/;
|
||||
print Dumper($dbh->{Profile});
|
||||
is_deeply sanitize_tree($dbh->{Profile}), bless {
|
||||
'Path' => [ '!MethodName', '!Caller2' ],
|
||||
'Data' => { 'do' => {
|
||||
$expected_caller => [ 1, 0, 0, 0, 0, 0, 0 ]
|
||||
} }
|
||||
} => 'DBI::Profile'
|
||||
or warn Dumper $dbh->{Profile};
|
||||
|
||||
|
||||
# can turn it on at connect
|
||||
$dbh = DBI->connect("dbi:ExampleP:", '', '', { RaiseError=>1, Profile=>6 });
|
||||
is_deeply $dbh->{Profile}{Path}, [ '!Statement', '!MethodName' ];
|
||||
cmp_ok(keys %{ $dbh->{Profile}{Data} }, '==', 1, 'on at connect, 1 key');
|
||||
cmp_ok(keys %{ $dbh->{Profile}{Data}{""} }, '>=', 1, 'on at connect, 1 key'); # at least STORE
|
||||
ok(ref $dbh->{Profile}{Data}{""}{STORE}, 'STORE is ref');
|
||||
|
||||
print "dbi_profile\n";
|
||||
# Try to avoid rounding problem on double precision systems
|
||||
# $got->[5] = '1150962858.01596498'
|
||||
# $expected->[5] = '1150962858.015965'
|
||||
# by treating as a string (because is_deeply stringifies)
|
||||
my $t1 = DBI::dbi_time() . "";
|
||||
my $dummy_statement = "Hi mom";
|
||||
my $dummy_methname = "my_method_name";
|
||||
my $leaf = dbi_profile($dbh, $dummy_statement, $dummy_methname, $t1, $t1 + 1);
|
||||
print Dumper($dbh->{Profile});
|
||||
cmp_ok(keys %{ $dbh->{Profile}{Data} }, '==', 2, 'avoid rounding, 1 key');
|
||||
cmp_ok(keys %{ $dbh->{Profile}{Data}{$dummy_statement} }, '==', 1,
|
||||
'avoid rounding, 1 dummy statement');
|
||||
is(ref($dbh->{Profile}{Data}{$dummy_statement}{$dummy_methname}), 'ARRAY',
|
||||
'dummy method name is array');
|
||||
|
||||
ok $leaf, "should return ref to leaf node";
|
||||
is ref $leaf, 'ARRAY', "should return ref to leaf node";
|
||||
|
||||
my $mine = $dbh->{Profile}{Data}{$dummy_statement}{$dummy_methname};
|
||||
|
||||
is $leaf, $mine, "should return ref to correct leaf node";
|
||||
|
||||
print "@$mine\n";
|
||||
is_deeply $mine, [ 1, 1, 1, 1, 1, $t1, $t1 ];
|
||||
|
||||
my $t2 = DBI::dbi_time() . "";
|
||||
dbi_profile($dbh, $dummy_statement, $dummy_methname, $t2, $t2 + 2);
|
||||
print "@$mine\n";
|
||||
is_deeply $mine, [ 2, 3, 1, 1, 2, $t1, $t2 ];
|
||||
|
||||
|
||||
print "Test collected profile data\n";
|
||||
|
||||
$dbh = DBI->connect("dbi:ExampleP:", '', '', { RaiseError=>1, Profile=>2 });
|
||||
# do a (hopefully) measurable amount of work
|
||||
my $sql = "select mode,size,name from ?";
|
||||
my $sth = $dbh->prepare($sql);
|
||||
for my $loop (1..50) { # enough work for low-res timers or v.fast cpus
|
||||
$sth->execute(".");
|
||||
while ( my $hash = $sth->fetchrow_hashref ) {}
|
||||
}
|
||||
$dbh->do("set foo=1");
|
||||
|
||||
print Dumper($dbh->{Profile});
|
||||
|
||||
# check that the proper key was set in Data
|
||||
my $data = $dbh->{Profile}{Data}{$sql};
|
||||
ok($data, 'profile data');
|
||||
is(ref $data, 'ARRAY', 'ARRAY ref');
|
||||
ok(@$data == 7, '7 elements');
|
||||
ok((grep { defined($_) } @$data) == 7, 'all 7 defined');
|
||||
ok((grep { DBI::looks_like_number($_) } @$data) == 7, 'all 7 numeric');
|
||||
my ($count, $total, $first, $shortest, $longest, $time1, $time2) = @$data;
|
||||
ok($count > 3, 'count is 3');
|
||||
ok($total > $first, ' total > first');
|
||||
ok($total > $longest, 'total > longest') or
|
||||
warn "total $total > longest $longest: failed\n";
|
||||
ok($longest > 0, 'longest > 0') or
|
||||
warn "longest $longest > 0: failed\n"; # XXX theoretically not reliable
|
||||
ok($longest > $shortest, 'longest > shortest');
|
||||
ok($time1 >= $^T, 'time1 later than start time');
|
||||
ok($time2 >= $^T, 'time2 later than start time');
|
||||
ok($time1 <= $time2, 'time1 <= time2');
|
||||
my $next = int(dbi_time()) + 1;
|
||||
ok($next > $time1, 'next > time1') or
|
||||
warn "next $next > first $time1: failed\n";
|
||||
ok($next > $time2, 'next > time2') or
|
||||
warn "next $next > last $time2: failed\n";
|
||||
if ($shortest < 0) {
|
||||
my $sys = "$Config{archname} $Config{osvers}"; # ie sparc-linux 2.4.20-2.3sparcsmp
|
||||
warn <<EOT;
|
||||
Time went backwards at some point during the test on this $sys system!
|
||||
Perhaps you have time sync software (like NTP) that adjusted the clock
|
||||
by more than $shortest seconds during the test.
|
||||
Also some multiprocessor systems, and some virtualization systems can exhibit
|
||||
this kind of clock behaviour. Please retry.
|
||||
EOT
|
||||
# don't treat small negative values as failure
|
||||
$shortest = 0 if $shortest > -0.008;
|
||||
}
|
||||
|
||||
|
||||
my $tmp = sanitize_tree($dbh->{Profile});
|
||||
$tmp->{Data}{$sql}[0] = -1; # make test insensitive to local file count
|
||||
is_deeply $tmp, (bless {
|
||||
'Path' => [ '!Statement' ],
|
||||
'Data' => {
|
||||
'' => [ 6, 0, 0, 0, 0, 0, 0 ],
|
||||
$sql => [ -1, 0, 0, 0, 0, 0, 0 ],
|
||||
'set foo=1' => [ 1, 0, 0, 0, 0, 0, 0 ],
|
||||
}
|
||||
} => 'DBI::Profile'), 'profile';
|
||||
|
||||
print "Test profile format\n";
|
||||
my $output = $dbh->{Profile}->format();
|
||||
print "Profile Output\n$output";
|
||||
|
||||
# check that output was produced in the expected format
|
||||
ok(length $output, 'non zero length');
|
||||
ok($output =~ /^DBI::Profile:/, 'DBI::Profile');
|
||||
ok($output =~ /\((\d+) calls\)/, 'some calls');
|
||||
ok($1 >= $count, 'calls >= count');
|
||||
|
||||
# -----------------------------------------------------------------------------------
|
||||
|
||||
# try statement and method name and reference-to-scalar path
|
||||
my $by_reference = 'foo';
|
||||
$dbh = DBI->connect("dbi:ExampleP:", 'usrnam', '', {
|
||||
RaiseError => 1,
|
||||
Profile => { Path => [ '{Username}', '!Statement', \$by_reference, '!MethodName' ] }
|
||||
});
|
||||
$sql = "select name from .";
|
||||
$sth = $dbh->prepare($sql);
|
||||
$sth->execute();
|
||||
$sth->fetchrow_hashref;
|
||||
$by_reference = 'bar';
|
||||
$sth->finish;
|
||||
undef $sth; # DESTROY
|
||||
|
||||
$tmp = sanitize_tree($dbh->{Profile});
|
||||
ok $tmp->{Data}{usrnam}{""}{foo}{STORE}, 'username stored';
|
||||
$tmp->{Data}{usrnam}{""}{foo} = {};
|
||||
# make test insentitive to number of local files
|
||||
#warn Dumper($tmp);
|
||||
is_deeply $tmp, bless {
|
||||
'Path' => [ '{Username}', '!Statement', \$by_reference, '!MethodName' ],
|
||||
'Data' => {
|
||||
'' => { # because Profile was enabled by DBI just before Username was set
|
||||
'' => {
|
||||
'foo' => {
|
||||
'STORE' => [ 3, 0, 0, 0, 0, 0, 0 ],
|
||||
}
|
||||
}
|
||||
},
|
||||
'usrnam' => {
|
||||
'' => {
|
||||
'foo' => { },
|
||||
},
|
||||
'select name from .' => {
|
||||
'foo' => {
|
||||
'execute' => [ 1, 0, 0, 0, 0, 0, 0 ],
|
||||
'fetchrow_hashref' => [ 1, 0, 0, 0, 0, 0, 0 ],
|
||||
'prepare' => [ 1, 0, 0, 0, 0, 0, 0 ],
|
||||
},
|
||||
'bar' => {
|
||||
'DESTROY' => [ 1, 0, 0, 0, 0, 0, 0 ],
|
||||
'finish' => [ 1, 0, 0, 0, 0, 0, 0 ],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
} => 'DBI::Profile';
|
||||
|
||||
$tmp = [ $dbh->{Profile}->as_node_path_list() ];
|
||||
is @$tmp, 8, 'should have 8 nodes';
|
||||
sanitize_profile_data_nodes($_->[0]) for @$tmp;
|
||||
#warn Dumper($dbh->{Profile}->{Data});
|
||||
is_deeply $tmp, [
|
||||
[ [ 3, 0, 0, 0, 0, 0, 0 ], '', '', 'foo', 'STORE' ],
|
||||
[ [ 2, 0, 0, 0, 0, 0, 0 ], 'usrnam', '', 'foo', 'STORE' ],
|
||||
[ [ 1, 0, 0, 0, 0, 0, 0 ], 'usrnam', '', 'foo', 'connected' ],
|
||||
[ [ 1, 0, 0, 0, 0, 0, 0 ], 'usrnam', 'select name from .', 'bar', 'DESTROY' ],
|
||||
[ [ 1, 0, 0, 0, 0, 0, 0 ], 'usrnam', 'select name from .', 'bar', 'finish' ],
|
||||
[ [ 1, 0, 0, 0, 0, 0, 0 ], 'usrnam', 'select name from .', 'foo', 'execute' ],
|
||||
[ [ 1, 0, 0, 0, 0, 0, 0 ], 'usrnam', 'select name from .', 'foo', 'fetchrow_hashref' ],
|
||||
[ [ 1, 0, 0, 0, 0, 0, 0 ], 'usrnam', 'select name from .', 'foo', 'prepare' ]
|
||||
];
|
||||
|
||||
|
||||
print "testing '!File', '!Caller' and their variants in Path\n";
|
||||
|
||||
$dbh->{Profile}->{Path} = [ '!File', '!File2', '!Caller', '!Caller2' ];
|
||||
$dbh->{Profile}->{Data} = undef;
|
||||
|
||||
my $file = (File::Spec->splitpath(__FILE__))[2]; # '40profile.t'
|
||||
my ($line1, $line2);
|
||||
sub a_sub {
|
||||
$sth = $dbh->prepare("select name from ."); $line2 = __LINE__;
|
||||
}
|
||||
a_sub(); $line1 = __LINE__;
|
||||
|
||||
$tmp = sanitize_profile_data_nodes($dbh->{Profile}{Data});
|
||||
#warn Dumper($tmp);
|
||||
is_deeply $tmp, {
|
||||
"$file" => {
|
||||
"$file via $file" => {
|
||||
"$file line $line2" => {
|
||||
"$file line $line2 via $file line $line1" => [ 1, 0, 0, 0, 0, 0, 0 ]
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
print "testing '!Time' and variants in Path\n";
|
||||
|
||||
undef $sth;
|
||||
my $factor = 1_000_000;
|
||||
$dbh->{Profile}->{Path} = [ '!Time', "!Time~$factor", '!MethodName' ];
|
||||
$dbh->{Profile}->{Data} = undef;
|
||||
|
||||
# give up a timeslice in the hope that the following few lines
|
||||
# run in well under a second even of slow/overloaded systems
|
||||
$t1 = int(dbi_time())+1; 1 while int(dbi_time()-0.01) < $t1; # spin till just after second starts
|
||||
$t2 = int($t1/$factor)*$factor;
|
||||
|
||||
$sth = $dbh->prepare("select name from .");
|
||||
$tmp = sanitize_profile_data_nodes($dbh->{Profile}{Data});
|
||||
|
||||
# if actual "!Time" recorded is 'close enough' then we'll pass
|
||||
# the test - it's not worth failing just because a system is slow
|
||||
$t1 = (keys %$tmp)[0] if (abs($t1 - (keys %$tmp)[0]) <= 5);
|
||||
|
||||
is_deeply $tmp, {
|
||||
$t1 => { $t2 => { prepare => [ 1, 0, 0, 0, 0, 0, 0 ] }}
|
||||
}, "!Time and !Time~$factor should work"
|
||||
or warn Dumper([$t1, $t2, $tmp]);
|
||||
|
||||
|
||||
print "testing &norm_std_n3 in Path\n";
|
||||
|
||||
$dbh->{Profile} = '&norm_std_n3'; # assign as string to get magic
|
||||
is_deeply $dbh->{Profile}{Path}, [
|
||||
\&DBI::ProfileSubs::norm_std_n3
|
||||
];
|
||||
$dbh->{Profile}->{Data} = undef;
|
||||
$sql = qq{insert into foo20060726 (a,b) values (42,"foo")};
|
||||
dbi_profile( { foo => $dbh, bar => undef }, $sql, 'mymethod', 100000000, 100000002);
|
||||
$tmp = $dbh->{Profile}{Data};
|
||||
#warn Dumper($tmp);
|
||||
is_deeply $tmp, {
|
||||
'insert into foo<N> (a,b) values (<N>,"<S>")' => [ 1, '2', '2', '2', '2', '100000000', '100000000' ]
|
||||
}, '&norm_std_n3 should normalize statement';
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------------
|
||||
|
||||
print "testing code ref in Path\n";
|
||||
|
||||
sub run_test1 {
|
||||
my ($profile) = @_;
|
||||
$dbh = DBI->connect("dbi:ExampleP:", 'usrnam', '', {
|
||||
RaiseError => 1,
|
||||
Profile => $profile,
|
||||
});
|
||||
$sql = "select name from .";
|
||||
$sth = $dbh->prepare($sql);
|
||||
$sth->execute();
|
||||
$sth->fetchrow_hashref;
|
||||
$sth->finish;
|
||||
undef $sth; # DESTROY
|
||||
my $data = sanitize_profile_data_nodes($dbh->{Profile}{Data}, 1);
|
||||
return ($data, $dbh) if wantarray;
|
||||
return $data;
|
||||
}
|
||||
|
||||
$tmp = run_test1( { Path => [ 'foo', sub { 'bar' }, 'baz' ] });
|
||||
is_deeply $tmp, { 'foo' => { 'bar' => { 'baz' => [ 11, 0,0,0,0,0,0 ] } } };
|
||||
|
||||
$tmp = run_test1( { Path => [ 'foo', sub { 'ping','pong' } ] });
|
||||
is_deeply $tmp, { 'foo' => { 'ping' => { 'pong' => [ 11, 0,0,0,0,0,0 ] } } };
|
||||
|
||||
$tmp = run_test1( { Path => [ 'foo', sub { \undef } ] });
|
||||
is_deeply $tmp, { 'foo' => undef }, 'should be vetoed';
|
||||
|
||||
# check what code ref sees in $_
|
||||
$tmp = run_test1( { Path => [ sub { $_ } ] });
|
||||
is_deeply $tmp, {
|
||||
'' => [ 6, 0, 0, 0, 0, 0, 0 ],
|
||||
'select name from .' => [ 5, 0, 0, 0, 0, 0, 0 ]
|
||||
}, '$_ should contain statement';
|
||||
|
||||
# check what code ref sees in @_
|
||||
$tmp = run_test1( { Path => [ sub { my ($h,$method) = @_; return \undef if $method =~ /^[A-Z]+$/; return (ref $h, $method) } ] });
|
||||
is_deeply $tmp, {
|
||||
'DBI::db' => {
|
||||
'connected' => [ 1, 0, 0, 0, 0, 0, 0 ],
|
||||
'prepare' => [ 1, 0, 0, 0, 0, 0, 0 ],
|
||||
},
|
||||
'DBI::st' => {
|
||||
'fetchrow_hashref' => [ 1, 0, 0, 0, 0, 0, 0 ],
|
||||
'execute' => [ 1, 0, 0, 0, 0, 0, 0 ],
|
||||
'finish' => [ 1, 0, 0, 0, 0, 0, 0 ],
|
||||
},
|
||||
}, 'should have @_ as keys';
|
||||
|
||||
# check we can filter by method
|
||||
$tmp = run_test1( { Path => [ sub { return \undef unless $_[1] =~ /^fetch/; return $_[1] } ] });
|
||||
#warn Dumper($tmp);
|
||||
is_deeply $tmp, {
|
||||
'fetchrow_hashref' => [ 1, 0, 0, 0, 0, 0, 0 ],
|
||||
}, 'should be able to filter by method';
|
||||
|
||||
DBI->trace(0, "STDOUT"); # close current log to flush it
|
||||
ok(-s $LOG_FILE, 'output should go to log file');
|
||||
|
||||
# -----------------------------------------------------------------------------------
|
||||
|
||||
print "testing as_text\n";
|
||||
|
||||
# check %N$ indices
|
||||
$dbh->{Profile}->{Data} = { P1 => { P2 => [ 100, 400, 42, 43, 44, 45, 46, 47 ] } };
|
||||
my $as_text = $dbh->{Profile}->as_text({
|
||||
path => [ 'top' ],
|
||||
separator => ':',
|
||||
format => '%1$s %2$d [ %10$d %11$d %12$d %13$d %14$d %15$d %16$d %17$d ]',
|
||||
});
|
||||
is($as_text, "top:P1:P2 4 [ 100 400 42 43 44 45 46 47 ]", 'as_text');
|
||||
|
||||
# test sortsub
|
||||
$dbh->{Profile}->{Data} = {
|
||||
A => { Z => [ 101, 1, 2, 3, 4, 5, 6, 7 ] },
|
||||
B => { Y => [ 102, 1, 2, 3, 4, 5, 6, 7 ] },
|
||||
};
|
||||
$as_text = $dbh->{Profile}->as_text({
|
||||
separator => ':',
|
||||
format => '%1$s %10$d ',
|
||||
sortsub => sub { my $ary=shift; @$ary = sort { $a->[2] cmp $b->[2] } @$ary }
|
||||
});
|
||||
is($as_text, "B:Y 102 A:Z 101 ", 'as_text sortsub');
|
||||
|
||||
# general test, including defaults
|
||||
($tmp, $dbh) = run_test1( { Path => [ 'foo', '!MethodName', 'baz' ] });
|
||||
$as_text = $dbh->{Profile}->as_text();
|
||||
$as_text =~ s/\.00+/.0/g;
|
||||
#warn "[$as_text]";
|
||||
is $as_text, q{foo > DESTROY > baz: 0.0s / 1 = 0.0s avg (first 0.0s, min 0.0s, max 0.0s)
|
||||
foo > STORE > baz: 0.0s / 5 = 0.0s avg (first 0.0s, min 0.0s, max 0.0s)
|
||||
foo > connected > baz: 0.0s / 1 = 0.0s avg (first 0.0s, min 0.0s, max 0.0s)
|
||||
foo > execute > baz: 0.0s / 1 = 0.0s avg (first 0.0s, min 0.0s, max 0.0s)
|
||||
foo > fetchrow_hashref > baz: 0.0s / 1 = 0.0s avg (first 0.0s, min 0.0s, max 0.0s)
|
||||
foo > finish > baz: 0.0s / 1 = 0.0s avg (first 0.0s, min 0.0s, max 0.0s)
|
||||
foo > prepare > baz: 0.0s / 1 = 0.0s avg (first 0.0s, min 0.0s, max 0.0s)
|
||||
}, 'as_text general';
|
||||
|
||||
# -----------------------------------------------------------------------------------
|
||||
|
||||
print "dbi_profile_merge_nodes\n";
|
||||
my $total_time = dbi_profile_merge_nodes(
|
||||
my $totals=[],
|
||||
[ 10, 0.51, 0.11, 0.01, 0.22, 1023110000, 1023110010 ],
|
||||
[ 15, 0.42, 0.12, 0.02, 0.23, 1023110005, 1023110009 ],
|
||||
);
|
||||
$_ = sprintf "%.2f", $_ for @$totals; # avoid precision issues
|
||||
is("@$totals", "25.00 0.93 0.11 0.01 0.23 1023110000.00 1023110010.00",
|
||||
'merged nodes');
|
||||
is($total_time, 0.93, 'merged time');
|
||||
|
||||
$total_time = dbi_profile_merge_nodes(
|
||||
$totals=[], {
|
||||
foo => [ 10, 1.51, 0.11, 0.01, 0.22, 1023110000, 1023110010 ],
|
||||
bar => [ 17, 1.42, 0.12, 0.02, 0.23, 1023110005, 1023110009 ],
|
||||
}
|
||||
);
|
||||
$_ = sprintf "%.2f", $_ for @$totals; # avoid precision issues
|
||||
is("@$totals", "27.00 2.93 0.11 0.01 0.23 1023110000.00 1023110010.00",
|
||||
'merged time foo/bar');
|
||||
is($total_time, 2.93, 'merged nodes foo/bar time');
|
||||
|
||||
exit 0;
|
||||
|
||||
|
||||
sub sanitize_tree {
|
||||
my $data = shift;
|
||||
my $skip_clone = shift;
|
||||
return $data unless ref $data;
|
||||
$data = dclone($data) unless $skip_clone;
|
||||
sanitize_profile_data_nodes($data->{Data}) if $data->{Data};
|
||||
return $data;
|
||||
}
|
||||
|
||||
sub sanitize_profile_data_nodes {
|
||||
my $node = shift;
|
||||
if (ref $node eq 'HASH') {
|
||||
sanitize_profile_data_nodes($_) for values %$node;
|
||||
}
|
||||
elsif (ref $node eq 'ARRAY') {
|
||||
if (@$node == 7 and DBI::looks_like_number($node->[0])) {
|
||||
# sanitize the profile data node to simplify tests
|
||||
$_ = 0 for @{$node}[1..@$node-1]; # not 0
|
||||
}
|
||||
}
|
||||
return $node;
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue