This commit is contained in:
huang 2015-03-06 16:22:07 +08:00
parent 6e7b294452
commit f898e2aa91
48 changed files with 5429 additions and 0 deletions

1
.gitignore vendored
View File

@ -21,3 +21,4 @@
.rbenv-gemsets
.DS_Store
public/api_doc/
/.metadata

View File

@ -0,0 +1,20 @@
<div id="tags_show">
<%= render :partial => "tags/tag_name",:locals => {:obj => obj,:non_list_all => false ,:object_flag => object_flag} %>
</div>
<% if User.current.logged? %>
<span> <%= toggle_link ("+ 添加标签"), 'put-tag-form', {:focus => 'tags_name'} %> </span>
<% end %>
<div id="put-tag-form" style="display: none">
<%= form_for "tag_for_save",:remote=>true,:url=>tag_path,
:update => "tags_show",
:complete => '$("#put-tag-form").slideUp();' do |f| %>
<%= f.text_field :name ,:id => "tags_name",:size=>"20",
:require=>true,
:maxlength => Setting.tags_max_length,
:minlength=>Setting.tags_min_length %>
<%= f.text_field :object_id,:value=> obj.id,:style=>"display:none"%>
<%= f.text_field :object_flag,:value=> object_flag,:style=>"display:none"%>
<a href="#" onclick='$("#tags_name").parent().submit();' type="button" class="submit f_l"></a>
<% end %>
</div>

View File

@ -0,0 +1,13 @@
lib/dalli.rb b1fd9d39df06608fcae5bcf46e9940f95b186d22
lib/dalli/ring.rb e2cd42d8b963e669e2c8a83791fa56ec94f9ec55
lib/dalli/server.rb a42d734f9b3d654886c86f04fb4f0352e7147b1e
lib/dalli/client.rb 11afa0d702c68a151c87ea6e7ccdc863cf03884f
lib/dalli/socket.rb 18b7243332ec2dafa9a17c195944e321b684e67e
lib/dalli/options.rb d7ecb4c52b4ae2b222f319813234297e0951f82a
lib/dalli/version.rb fff3231b7f52d7fa1dabb78bf1f67dcef95c5378
lib/dalli/railtie.rb 63dc0fe85790a10225e867774f2c611d1c1ac46c
lib/dalli/compressor.rb 13b0cf3f607bd8bc9f969679b0b6e9dcb0a059d7
lib/dalli/cas/client.rb 983ded7ec738ed4502658150123e9c5ad7e3faa1
lib/rack/session/dalli.rb 2696ad72e8f9d7f5ceb232db0c8d9a8916192edb
lib/active_support/cache/dalli_store.rb 010d880e0f297d92b26c8f44e446add9d4fedfa2
lib/action_dispatch/middleware/session/dalli_store.rb 62236273ea28a91502871f31aa600e038358931a

Binary file not shown.

Binary file not shown.

Binary file not shown.

12
lib/dalli-2.7.2/Gemfile Normal file
View File

@ -0,0 +1,12 @@
source 'https://rubygems.org'
gemspec
gem 'rake'
gem 'kgio', :platform => :mri
gem 'appraisal'
gem 'connection_pool'
group :test do
gem 'simplecov'
end

412
lib/dalli-2.7.2/History.md Normal file
View File

@ -0,0 +1,412 @@
Dalli Changelog
=====================
2.7.2
==========
- The fix for #423 didn't make it into the released 2.7.1 gem somehow.
2.7.1
==========
- Rack session will check if servers are up on initialization (arthurnn, #423)
- Add support for IPv6 addresses in hex form, ie: "[::1]:11211" (dplummer, #428)
- Add symbol support for namespace (jingkai #431)
- Support expiration intervals longer than 30 days (leonid-shevtsov #436)
2.7.0
==========
- BREAKING CHANGE:
Dalli::Client#add and #replace now return a truthy value, not boolean true or false.
- Multithreading support with dalli\_store:
Use :pool\_size to create a pool of shared, threadsafe Dalli clients in Rails:
```ruby
config.cache_store = :dalli_store, "cache-1.example.com", "cache-2.example.com", :compress => true, :pool_size => 5, :expires_in => 300
```
This will ensure the Rails.cache singleton does not become a source of contention.
**PLEASE NOTE** Rails's :mem\_cache\_store does not support pooling as of
Rails 4.0. You must use :dalli\_store.
- Implement `version` for retrieving version of connected servers [dterei, #384]
- Implement `fetch_multi` for batched read/write [sorentwo, #380]
- Add more support for safe updates with multiple writers: [philipmw, #395]
`require 'dalli/cas/client'` augments Dalli::Client with the following methods:
* Get value with CAS: `[value, cas] = get_cas(key)`
`get_cas(key) {|value, cas| ...}`
* Get multiple values with CAS: `get_multi_cas(k1, k2, ...) {|value, metadata| cas = metadata[:cas]}`
* Set value with CAS: `new_cas = set_cas(key, value, cas, ttl, options)`
* Replace value with CAS: `replace_cas(key, new_value, cas, ttl, options)`
* Delete value with CAS: `delete_cas(key, cas)`
- Fix bug with get key with "Not found" value [uzzz, #375]
2.6.4
=======
- Fix ADD command, aka `write(unless_exist: true)` (pitr, #365)
- Upgrade test suite from mini\_shoulda to minitest.
- Even more performance improvements for get\_multi (xaop, #331)
2.6.3
=======
- Support specific stats by passing `:items` or `:slabs` to `stats` method [bukhamseen]
- Fix 'can't modify frozen String' errors in `ActiveSupport::Cache::DalliStore` [dblock]
- Protect against objects with custom equality checking [theron17]
- Warn if value for key is too large to store [locriani]
2.6.2
=======
- Properly handle missing RubyInline
2.6.1
=======
- Add optional native C binary search for ring, add:
gem 'RubyInline'
to your Gemfile to get a 10% speedup when using many servers.
You will see no improvement if you are only using one server.
- More get_multi performance optimization [xaop, #315]
- Add lambda support for cache namespaces [joshwlewis, #311]
2.6.0
=======
- read_multi optimization, now checks local_cache [chendo, #306]
- Re-implement get_multi to be non-blocking [tmm1, #295]
- Add `dalli` accessor to dalli_store to access the underlying
Dalli::Client, for things like `get_multi`.
- Add `Dalli::GzipCompressor`, primarily for compatibility with nginx's HttpMemcachedModule using `memcached_gzip_flag`
2.5.0
=======
- Don't escape non-ASCII keys, memcached binary protocol doesn't care. [#257]
- :dalli_store now implements LocalCache [#236]
- Removed lots of old session_store test code, tests now all run without a default memcached server [#275]
- Changed Dalli ActiveSupport adapter to always attempt instrumentation [brianmario, #284]
- Change write operations (add/set/replace) to return false when value is too large to store [brianmario, #283]
- Allowing different compressors per client [naseem]
2.4.0
=======
- Added the ability to swap out the compressed used to [de]compress cache data [brianmario, #276]
- Fix get\_multi performance issues with lots of memcached servers [tmm1]
- Throw more specific exceptions [tmm1]
- Allowing different types of serialization per client [naseem]
2.3.0
=======
- Added the ability to swap out the serializer used to [de]serialize cache data [brianmario, #274]
2.2.1
=======
- Fix issues with ENV-based connections. [#266]
- Fix problem with SessionStore in Rails 4.0 [#265]
2.2.0
=======
- Add Rack session with\_lock helper, for Rails 4.0 support [#264]
- Accept connection string in the form of a URL (e.g., memcached://user:pass@hostname:port) [glenngillen]
- Add touch operation [#228, uzzz]
2.1.0
=======
- Add Railtie to auto-configure Dalli when included in Gemfile [#217, steveklabnik]
2.0.5
=======
- Create proper keys for arrays of objects passed as keys [twinturbo, #211]
- Handle long key with namespace [#212]
- Add NODELAY to TCP socket options [#206]
2.0.4
=======
- Dalli no longer needs to be reset after Unicorn/Passenger fork [#208]
- Add option to re-raise errors rescued in the session and cache stores. [pitr, #200]
- DalliStore#fetch called the block if the cached value == false [#205]
- DalliStore should have accessible options [#195]
- Add silence and mute support for DalliStore [#207]
- Tracked down and fixed socket corruption due to Timeout [#146]
2.0.3
=======
- Allow proper retrieval of stored `false` values [laserlemon, #197]
- Allow non-ascii and whitespace keys, only the text protocol has those restrictions [#145]
- Fix DalliStore#delete error-handling [#196]
2.0.2
=======
- Fix all dalli\_store operations to handle nil options [#190]
- Increment and decrement with :initial => nil now return nil (lawrencepit, #112)
2.0.1
=======
- Fix nil option handling in dalli\_store#write [#188]
2.0.0
=======
- Reimplemented the Rails' dalli\_store to remove use of
ActiveSupport::Cache::Entry which added 109 bytes overhead to every
value stored, was a performance bottleneck and duplicated a lot of
functionality already in Dalli. One benchmark went from 4.0 sec to 3.0
sec with the new dalli\_store. [#173]
- Added reset\_stats operation [#155]
- Added support for configuring keepalive on TCP connections to memcached servers (@bianster, #180)
Notes:
* data stored with dalli\_store 2.x is NOT backwards compatible with 1.x.
Upgraders are advised to namespace their keys and roll out the 2.x
upgrade slowly so keys do not clash and caches are warmed.
`config.cache_store = :dalli_store, :expires_in => 24.hours.to_i, :namespace => 'myapp2'`
* data stored with plain Dalli::Client API is unchanged.
* removed support for dalli\_store's race\_condition\_ttl option.
* removed support for em-synchrony and unix socket connection options.
* removed support for Ruby 1.8.6
* removed memcache-client compability layer and upgrade documentation.
1.1.5
=======
- Coerce input to incr/decr to integer via #to\_i [#165]
- Convert test suite to minitest/spec (crigor, #166)
- Fix encoding issue with keys [#162]
- Fix double namespacing with Rails and dalli\_store. [#160]
1.1.4
=======
- Use 127.0.0.1 instead of localhost as default to avoid IPv6 issues
- Extend DalliStore's :expires\_in when :race\_condition\_ttl is also used.
- Fix :expires\_in option not propogating from DalliStore to Client, GH-136
- Added support for native Rack session store. Until now, Dalli's
session store has required Rails. Now you can use Dalli to store
sessions for any Rack application.
require 'rack/session/dalli'
use Rack::Session::Dalli, :memcache_server => 'localhost:11211', :compression => true
1.1.3
=======
- Support Rails's autoloading hack for loading sessions with objects
whose classes have not be required yet, GH-129
- Support Unix sockets for connectivity. Shows a 2x performance
increase but keep in mind they only work on localhost. (dfens)
1.1.2
=======
- Fix incompatibility with latest Rack session API when destroying
sessions, thanks @twinge!
1.1.1
=======
v1.1.0 was a bad release. Yanked.
1.1.0
=======
- Remove support for Rails 2.3, add support for Rails 3.1
- Fix socket failure retry logic, now you can restart memcached and Dalli won't complain!
- Add support for fibered operation via em-synchrony (eliaslevy)
- Gracefully handle write timeouts, GH-99
- Only issue bug warning for unexpected StandardErrors, GH-102
- Add travis-ci build support (ryanlecompte)
- Gracefully handle errors in get_multi (michaelfairley)
- Misc fixes from crash2burn, fphilipe, igreg, raggi
1.0.5
=======
- Fix socket failure retry logic, now you can restart memcached and Dalli won't complain!
1.0.4
=======
- Handle non-ASCII key content in dalli_store
- Accept key array for read_multi in dalli_store
- Fix multithreaded race condition in creation of mutex
1.0.3
=======
- Better handling of application marshalling errors
- Work around jruby IO#sysread compatibility issue
1.0.2
=======
- Allow browser session cookies (blindsey)
- Compatibility fixes (mwynholds)
- Add backwards compatibility module for memcache-client, require 'dalli/memcache-client'. It makes
Dalli more compatible with memcache-client and prints out a warning any time you do something that
is no longer supported so you can fix your code.
1.0.1
=======
- Explicitly handle application marshalling bugs, GH-56
- Add support for username/password as options, to allow multiple bucket access
from the same Ruby process, GH-52
- Add support for >1MB values with :value_max_bytes option, GH-54 (r-stu31)
- Add support for default TTL, :expires_in, in Rails 2.3. (Steven Novotny)
config.cache_store = :dalli_store, 'localhost:11211', {:expires_in => 4.hours}
1.0.0
=======
Welcome gucki as a Dalli committer!
- Fix network and namespace issues in get_multi (gucki)
- Better handling of unmarshalling errors (mperham)
0.11.2
=======
- Major reworking of socket error and failover handling (gucki)
- Add basic JRuby support (mperham)
0.11.1
======
- Minor fixes, doc updates.
- Add optional support for kgio sockets, gives a 10-15% performance boost.
0.11.0
======
Warning: this release changes how Dalli marshals data. I do not guarantee compatibility until 1.0 but I will increment the minor version every time a release breaks compatibility until 1.0.
IT IS HIGHLY RECOMMENDED YOU FLUSH YOUR CACHE BEFORE UPGRADING.
- multi() now works reentrantly.
- Added new Dalli::Client option for default TTLs, :expires_in, defaults to 0 (aka forever).
- Added new Dalli::Client option, :compression, to enable auto-compression of values.
- Refactor how Dalli stores data on the server. Values are now tagged
as "marshalled" or "compressed" so they can be automatically deserialized
without the client having to know how they were stored.
0.10.1
======
- Prefer server config from environment, fixes Heroku session store issues (thanks JoshMcKin)
- Better handling of non-ASCII values (size -> bytesize)
- Assert that keys are ASCII only
0.10.0
======
Warning: this release changed how Rails marshals data with Dalli. Unfortunately previous versions double marshalled values. It is possible that data stored with previous versions of Dalli will not work with this version.
IT IS HIGHLY RECOMMENDED YOU FLUSH YOUR CACHE BEFORE UPGRADING.
- Rework how the Rails cache store does value marshalling.
- Rework old server version detection to avoid a socket read hang.
- Refactor the Rails 2.3 :dalli\_store to be closer to :mem\_cache\_store.
- Better documentation for session store config (plukevdh)
0.9.10
----
- Better server retry logic (next2you)
- Rails 3.1 compatibility (gucki)
0.9.9
----
- Add support for *_multi operations for add, set, replace and delete. This implements
pipelined network operations; Dalli disables network replies so we're not limited by
latency, allowing for much higher throughput.
dc = Dalli::Client.new
dc.multi do
dc.set 'a', 1
dc.set 'b', 2
dc.set 'c', 3
dc.delete 'd'
end
- Minor fix to set the continuum sorted by value (kangster)
- Implement session store with Rails 2.3. Update docs.
0.9.8
-----
- Implement namespace support
- Misc fixes
0.9.7
-----
- Small fix for NewRelic integration.
- Detect and fail on older memcached servers (pre-1.4).
0.9.6
-----
- Patches for Rails 3.0.1 integration.
0.9.5
-----
- Major design change - raw support is back to maximize compatibility with Rails
and the increment/decrement operations. You can now pass :raw => true to most methods
to bypass (un)marshalling.
- Support symbols as keys (ddollar)
- Rails 2.3 bug fixes
0.9.4
-----
- Dalli support now in rack-bug (http://github.com/brynary/rack-bug), give it a try!
- Namespace support for Rails 2.3 (bpardee)
- Bug fixes
0.9.3
-----
- Rails 2.3 support (beanieboi)
- Rails SessionStore support
- Passenger integration
- memcache-client upgrade docs, see Upgrade.md
0.9.2
----
- Verify proper operation in Heroku.
0.9.1
----
- Add fetch and cas operations (mperham)
- Add incr and decr operations (mperham)
- Initial support for SASL authentication via the MEMCACHE_{USERNAME,PASSWORD} environment variables, needed for Heroku (mperham)
0.9.0
-----
- Initial gem release.

20
lib/dalli-2.7.2/LICENSE Normal file
View File

@ -0,0 +1,20 @@
Copyright (c) Mike Perham
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -0,0 +1,42 @@
Performance
====================
Caching is all about performance, so I carefully track Dalli performance to ensure no regressions.
You can optionally use kgio to give Dalli a 10-20% performance boost: `gem install kgio`.
Note I've added some benchmarks over time to Dalli that the other libraries don't necessarily have.
memcache-client
---------------
Testing 1.8.5 with ruby 1.9.3p0 (2011-10-30 revision 33570) [x86_64-darwin11.2.0]
user system total real
set:plain:memcache-client 1.860000 0.310000 2.170000 ( 2.188030)
set:ruby:memcache-client 1.830000 0.290000 2.120000 ( 2.130212)
get:plain:memcache-client 1.830000 0.340000 2.170000 ( 2.176156)
get:ruby:memcache-client 1.900000 0.330000 2.230000 ( 2.235045)
multiget:ruby:memcache-client 0.860000 0.120000 0.980000 ( 0.987348)
missing:ruby:memcache-client 1.630000 0.320000 1.950000 ( 1.954867)
mixed:ruby:memcache-client 3.690000 0.670000 4.360000 ( 4.364469)
dalli
-----
Testing with Rails 3.2.1
Using kgio socket IO
Testing 2.0.0 with ruby 1.9.3p125 (2012-02-16 revision 34643) [x86_64-darwin11.3.0]
user system total real
mixed:rails:dalli 1.580000 0.570000 2.150000 ( 3.008839)
set:plain:dalli 0.730000 0.300000 1.030000 ( 1.567098)
setq:plain:dalli 0.520000 0.120000 0.640000 ( 0.634402)
set:ruby:dalli 0.800000 0.300000 1.100000 ( 1.640348)
get:plain:dalli 0.840000 0.330000 1.170000 ( 1.668425)
get:ruby:dalli 0.850000 0.330000 1.180000 ( 1.665716)
multiget:ruby:dalli 0.700000 0.260000 0.960000 ( 0.965423)
missing:ruby:dalli 0.720000 0.320000 1.040000 ( 1.511720)
mixed:ruby:dalli 1.660000 0.640000 2.300000 ( 3.320743)
mixedq:ruby:dalli 1.630000 0.510000 2.140000 ( 2.629734)
incr:ruby:dalli 0.270000 0.100000 0.370000 ( 0.547618)

224
lib/dalli-2.7.2/README.md Normal file
View File

@ -0,0 +1,224 @@
Dalli [![Build Status](https://secure.travis-ci.org/mperham/dalli.png)](http://travis-ci.org/mperham/dalli) [![Dependency Status](https://gemnasium.com/mperham/dalli.png)](https://gemnasium.com/mperham/dalli) [![Code Climate](https://codeclimate.com/github/mperham/dalli.png)](https://codeclimate.com/github/mperham/dalli)
=====
Dalli is a high performance pure Ruby client for accessing memcached servers. It works with memcached 1.4+ only as it uses the newer binary protocol. It should be considered a replacement for the memcache-client gem.
The name is a variant of Salvador Dali for his famous painting [The Persistence of Memory](http://en.wikipedia.org/wiki/The_Persistence_of_Memory).
![Persistence of Memory](http://www.virtualdali.com/assets/paintings/31PersistenceOfMemory.jpg)
Dalli's initial development was sponsored by [CouchBase](http://www.couchbase.com/). Many thanks to them!
Design
------------
I decided to write Dalli after maintaining memcache-client for two years for a few specific reasons:
0. The code is mostly old and gross. The bulk of the code is a single 1000 line .rb file.
1. It has a lot of options that are infrequently used which complicate the codebase.
2. The implementation has no single point to attach monitoring hooks.
3. Uses the old text protocol, which hurts raw performance.
So a few notes. Dalli:
0. uses the exact same algorithm to choose a server so existing memcached clusters with TBs of data will work identically to memcache-client.
1. is approximately 20% faster than memcache-client (which itself was heavily optimized) in Ruby 1.9.2.
2. contains explicit "chokepoint" methods which handle all requests; these can be hooked into by monitoring tools (NewRelic, Rack::Bug, etc) to track memcached usage.
3. supports SASL for use in managed environments, e.g. Heroku.
4. provides proper failover with recovery and adjustable timeouts
Supported Ruby versions and implementations
------------------------------------------------
Dalli should work identically on:
* JRuby 1.6+
* Ruby 1.9.3+
* Rubinius 2.0
If you have problems, please enter an issue.
Installation and Usage
------------------------
Remember, Dalli **requires** memcached 1.4+. You can check the version with `memcached -h`. Please note that memcached that Mac OS X Snow Leopard ships with is 1.2.8 and won't work. Install 1.4.x using Homebrew with
brew install memcached
On Ubuntu you can install it by running:
apt-get install memcached
You can verify your installation using this piece of code:
```bash
gem install dalli
```
```ruby
require 'dalli'
options = { :namespace => "app_v1", :compress => true }
dc = Dalli::Client.new('localhost:11211', options)
dc.set('abc', 123)
value = dc.get('abc')
```
The test suite requires memcached 1.4.3+ with SASL enabled (brew install memcached --enable-sasl ; mv /usr/bin/memcached /usr/bin/memcached.old). Currently only supports the PLAIN mechanism.
Dalli has no runtime dependencies and never will. You can optionally install the 'kgio' gem to
give Dalli a 20-30% performance boost.
Usage with Rails 3.x and 4.x
---------------------------
In your Gemfile:
```ruby
gem 'dalli'
```
In `config/environments/production.rb`:
```ruby
config.cache_store = :dalli_store
```
Here's a more comprehensive example that sets a reasonable default for maximum cache entry lifetime (one day), enables compression for large values and namespaces all entries for this rails app. Remove the namespace if you have multiple apps which share cached values.
```ruby
config.cache_store = :dalli_store, 'cache-1.example.com', 'cache-2.example.com',
{ :namespace => NAME_OF_RAILS_APP, :expires_in => 1.day, :compress => true }
```
If your servers are specified in `ENV["MEMCACHE_SERVERS"]` (e.g. on Heroku when using a third-party hosted addon), simply provide `nil` for the servers:
```ruby
config.cache_store = :dalli_store, nil, { :namespace => NAME_OF_RAILS_APP, :expires_in => 1.day, :compress => true }
```
To use Dalli for Rails session storage that times out after 20 minutes, in `config/initializers/session_store.rb`:
For Rails >= 3.2.4:
```ruby
Rails.application.config.session_store ActionDispatch::Session::CacheStore, :expire_after => 20.minutes
```
For Rails 3.x:
```ruby
require 'action_dispatch/middleware/session/dalli_store'
Rails.application.config.session_store :dalli_store, :memcache_server => ['host1', 'host2'], :namespace => 'sessions', :key => '_foundation_session', :expire_after => 20.minutes
```
Dalli does not support Rails 2.x.
Multithreading and Rails
--------------------------
If you use Puma or another threaded app server, as of Dalli 2.7, you can use a pool
of Dalli clients with Rails to ensure the `Rails.cache` singleton does not become a
source of thread contention. You must add `gem 'connection_pool'` to your Gemfile and
add :pool\_size to your `dalli_store` config:
```ruby
config.cache_store = :dalli_store, 'cache-1.example.com', { :pool_size => 5 }
```
You can then use the Rails cache as normal or check out a Dalli client directly from the pool:
```ruby
Rails.cache.fetch('foo', :expires_in => 300) do
'bar'
end
Rails.cache.dalli.with do |client|
# client is a Dalli::Client instance which you can
# use ONLY within this block
end
```
Configuration
------------------------
Dalli::Client accepts the following options. All times are in seconds.
**expires_in**: Global default for key TTL. Default is 0, which means no expiry.
**failover**: Boolean, if true Dalli will failover to another server if the main server for a key is down.
**compress**: Boolean, if true Dalli will gzip-compress values larger than 1K.
**compression_min_size**: Minimum value byte size for which to attempt compression. Default is 1K.
**compression_max_size**: Maximum value byte size for which to attempt compression. Default is unlimited.
**serializer**: The serializer to use for objects being stored (ex. JSON).
Default is Marshal.
**socket_timeout**: Timeout for all socket operations (connect, read, write). Default is 0.5.
**socket_max_failures**: When a socket operation fails after socket_timeout, the same operation is retried. This is to not immediately mark a server down when there's a very slight network problem. Default is 2.
**socket_failure_delay**: Before retrying a socket operation, the process sleeps for this amount of time. Default is 0.01. Set to nil for no delay.
**down_retry_delay**: When a server has been marked down due to many failures, the server will be checked again for being alive only after this amount of time. Don't set this value to low, otherwise each request which tries the failed server might hang for the maximum **socket_timeout**. Default is 1 second.
**value_max_bytes**: The maximum size of a value in memcached. Defaults to 1MB, this can be increased with memcached's -I parameter. You must also configure Dalli to allow the larger size here.
**username**: The username to use for authenticating this client instance against a SASL-enabled memcached server. Heroku users should not need to use this normally.
**password**: The password to use for authenticating this client instance against a SASL-enabled memcached server. Heroku users should not need to use this normally.
**keepalive**: Boolean. If true, Dalli will enable keep-alive for socket connections. Default is true.
**compressor**: The compressor to use for objects being stored.
Default is zlib, implemented under `Dalli::Compressor`.
If serving compressed data using nginx's HttpMemcachedModule, set `memcached_gzip_flag 2` and use `Dalli::GzipCompressor`
Features and Changes
------------------------
By default, Dalli is thread-safe. Disable thread-safety at your own peril.
Dalli does not need anything special in Unicorn/Passenger since 2.0.4.
It will detect sockets shared with child processes and gracefully reopen the
socket.
Note that Dalli does not require ActiveSupport or Rails. You can safely use it in your own Ruby projects.
[View the Client API](http://www.rubydoc.info/github/mperham/dalli/Dalli/Client)
Helping Out
-------------
If you have a fix you wish to provide, please fork the code, fix in your local project and then send a pull request on github. Please ensure that you include a test which verifies your fix and update History.md with a one sentence description of your fix so you get credit as a contributor.
We're not accepting new compressors. They are trivial to add in an initializer. See #385 (LZ4), #406 (Snappy)
Thanks
------------
Eric Wong - for help using his [kgio](http://unicorn.bogomips.org/kgio/index.html) library.
Brian Mitchell - for his remix-stash project which was helpful when implementing and testing the binary protocol support.
[CouchBase](http://couchbase.com) - for their project sponsorship
Author
----------
Mike Perham, [mikeperham.com](http://mikeperham.com), [@mperham](http://twitter.com/mperham)
Copyright
-----------
Copyright (c) Mike Perham. See LICENSE for details.

42
lib/dalli-2.7.2/Rakefile Normal file
View File

@ -0,0 +1,42 @@
require 'appraisal'
require 'rake/testtask'
Rake::TestTask.new(:test) do |test|
test.libs << 'test'
test.pattern = 'test/**/test_*.rb'
test.warning = true
test.verbose = true
end
Rake::TestTask.new(:bench) do |test|
test.libs << 'test'
test.pattern = 'test/benchmark_test.rb'
end
begin
require 'metric_fu'
MetricFu::Configuration.run do |config|
config.rcov[:rcov_opts] << "-Itest:lib"
end
rescue LoadError
end
task :default => :test
task :test_all do
system('rake test RAILS_VERSION="~> 3.0.0"')
system('rake test RAILS_VERSION=">= 3.0.0"')
end
# 'gem install rdoc' to upgrade RDoc if this is giving you errors
begin
require 'rdoc/task'
RDoc::Task.new do |rd|
rd.rdoc_files.include("lib/**/*.rb")
end
rescue LoadError
puts "Unable to load rdoc, run 'gem install rdoc' to fix this."
end
require 'rake/clean'
CLEAN.include "**/*.rbc"
CLEAN.include "**/.DS_Store"

View File

@ -0,0 +1,29 @@
require './lib/dalli/version'
Gem::Specification.new do |s|
s.name = %q{dalli}
s.version = Dalli::VERSION
s.license = "MIT"
s.authors = ["Mike Perham"]
s.description = %q{High performance memcached client for Ruby}
s.email = %q{mperham@gmail.com}
s.files = Dir.glob("lib/**/*") + [
"LICENSE",
"README.md",
"History.md",
"Rakefile",
"Gemfile",
"dalli.gemspec",
"Performance.md",
]
s.homepage = %q{http://github.com/mperham/dalli}
s.rdoc_options = ["--charset=UTF-8"]
s.require_paths = ["lib"]
s.summary = %q{High performance memcached client for Ruby}
s.test_files = Dir.glob("test/**/*")
s.add_development_dependency(%q<minitest>, [">= 4.2.0"])
s.add_development_dependency(%q<mocha>, [">= 0"])
s.add_development_dependency(%q<rails>, ["~> 4"])
end

View File

@ -0,0 +1,81 @@
require 'active_support/cache'
require 'action_dispatch/middleware/session/abstract_store'
require 'dalli'
# Dalli-based session store for Rails 3.0.
module ActionDispatch
module Session
class DalliStore < AbstractStore
def initialize(app, options = {})
# Support old :expires option
options[:expire_after] ||= options[:expires]
super
@default_options = { :namespace => 'rack:session' }.merge(@default_options)
@pool = options[:cache] || begin
Dalli::Client.new(
@default_options[:memcache_server], @default_options)
end
@namespace = @default_options[:namespace]
@raise_errors = !!@default_options[:raise_errors]
super
end
def reset
@pool.reset
end
private
def get_session(env, sid)
sid = generate_sid unless sid and !sid.empty?
begin
session = @pool.get(sid) || {}
rescue Dalli::DalliError => ex
# re-raise ArgumentError so Rails' session abstract_store.rb can autoload any missing models
raise ArgumentError, ex.message if ex.message =~ /unmarshal/
Rails.logger.warn("Session::DalliStore#get: #{ex.message}")
session = {}
end
[sid, session]
end
def set_session(env, sid, session_data, options = nil)
options ||= env[ENV_SESSION_OPTIONS_KEY]
expiry = options[:expire_after]
@pool.set(sid, session_data, expiry)
sid
rescue Dalli::DalliError
Rails.logger.warn("Session::DalliStore#set: #{$!.message}")
raise if @raise_errors
false
end
def destroy_session(env, session_id, options)
begin
@pool.delete(session_id)
rescue Dalli::DalliError
Rails.logger.warn("Session::DalliStore#destroy_session: #{$!.message}")
raise if @raise_errors
end
return nil if options[:drop]
generate_sid
end
def destroy(env)
if sid = current_session_id(env)
@pool.delete(sid)
end
rescue Dalli::DalliError
Rails.logger.warn("Session::DalliStore#destroy: #{$!.message}")
raise if @raise_errors
false
end
end
end
end

View File

@ -0,0 +1,363 @@
# encoding: ascii
require 'dalli'
module ActiveSupport
module Cache
class DalliStore
attr_reader :silence, :options
alias_method :silence?, :silence
# Silence the logger.
def silence!
@silence = true
self
end
# Silence the logger within a block.
def mute
previous_silence, @silence = defined?(@silence) && @silence, true
yield
ensure
@silence = previous_silence
end
ESCAPE_KEY_CHARS = /[\x00-\x20%\x7F-\xFF]/
# Creates a new DalliStore object, with the given memcached server
# addresses. Each address is either a host name, or a host-with-port string
# in the form of "host_name:port". For example:
#
# ActiveSupport::Cache::DalliStore.new("localhost", "server-downstairs.localnetwork:8229")
#
# If no addresses are specified, then DalliStore will connect to
# localhost port 11211 (the default memcached port).
#
# Connection Pool support
#
# If you are using multithreaded Rails, the Rails.cache singleton can become a source
# of contention. You can use a connection pool of Dalli clients with Rails.cache by
# passing :pool_size and/or :pool_timeout:
#
# config.cache_store = :dalli_store, 'localhost:11211', :pool_size => 10
#
# Both pool options default to 5. You must include the `connection_pool` gem if you
# wish to use pool support.
#
def initialize(*addresses)
addresses = addresses.flatten
options = addresses.extract_options!
@options = options.dup
pool_options = {}
pool_options[:size] = options[:pool_size] if options[:pool_size]
pool_options[:timeout] = options[:pool_timeout] if options[:pool_timeout]
@options[:compress] ||= @options[:compression]
addresses.compact!
servers = if addresses.empty?
nil # use the default from Dalli::Client
else
addresses
end
if pool_options.empty?
@data = Dalli::Client.new(servers, @options)
else
@data = ::ConnectionPool.new(pool_options) { Dalli::Client.new(servers, @options.merge(:threadsafe => false)) }
end
extend Strategy::LocalCache
end
##
# Access the underlying Dalli::Client or ConnectionPool instance for
# access to get_multi, etc.
def dalli
@data
end
def with(&block)
@data.with(&block)
end
def fetch(name, options=nil)
options ||= {}
name = expanded_key name
if block_given?
unless options[:force]
entry = instrument(:read, name, options) do |payload|
read_entry(name, options).tap do |result|
if payload
payload[:super_operation] = :fetch
payload[:hit] = !!result
end
end
end
end
if !entry.nil?
instrument(:fetch_hit, name, options) { |payload| }
entry
else
result = instrument(:generate, name, options) do |payload|
yield
end
write(name, result, options)
result
end
else
read(name, options)
end
end
def read(name, options=nil)
options ||= {}
name = expanded_key name
instrument(:read, name, options) do |payload|
entry = read_entry(name, options)
payload[:hit] = !!entry if payload
entry
end
end
def write(name, value, options=nil)
options ||= {}
name = expanded_key name
instrument(:write, name, options) do |payload|
with do |connection|
options = options.merge(:connection => connection)
write_entry(name, value, options)
end
end
end
def exist?(name, options=nil)
options ||= {}
name = expanded_key name
log(:exist, name, options)
!read_entry(name, options).nil?
end
def delete(name, options=nil)
options ||= {}
name = expanded_key name
instrument(:delete, name, options) do |payload|
delete_entry(name, options)
end
end
# Reads multiple keys from the cache using a single call to the
# servers for all keys. Keys must be Strings.
def read_multi(*names)
names.extract_options!
mapping = names.inject({}) { |memo, name| memo[expanded_key(name)] = name; memo }
instrument(:read_multi, names) do
results = {}
if local_cache
mapping.keys.each do |key|
if value = local_cache.read_entry(key, options)
results[key] = value
end
end
end
data = with { |c| c.get_multi(mapping.keys - results.keys) }
results.merge!(data)
results.inject({}) do |memo, (inner, _)|
entry = results[inner]
# NB Backwards data compatibility, to be removed at some point
value = (entry.is_a?(ActiveSupport::Cache::Entry) ? entry.value : entry)
memo[mapping[inner]] = value
local_cache.write_entry(inner, value, options) if local_cache
memo
end
end
end
# Fetches data from the cache, using the given keys. If there is data in
# the cache with the given keys, then that data is returned. Otherwise,
# the supplied block is called for each key for which there was no data,
# and the result will be written to the cache and returned.
def fetch_multi(*names)
options = names.extract_options!
mapping = names.inject({}) { |memo, name| memo[expanded_key(name)] = name; memo }
instrument(:fetch_multi, names) do
with do |connection|
results = connection.get_multi(mapping.keys)
connection.multi do
mapping.inject({}) do |memo, (expanded, name)|
memo[name] = results[expanded]
if memo[name].nil?
value = yield(name)
memo[name] = value
options = options.merge(:connection => connection)
write_entry(expanded, value, options)
end
memo
end
end
end
end
end
# Increment a cached value. This method uses the memcached incr atomic
# operator and can only be used on values written with the :raw option.
# Calling it on a value not stored with :raw will fail.
# :initial defaults to the amount passed in, as if the counter was initially zero.
# memcached counters cannot hold negative values.
def increment(name, amount = 1, options=nil)
options ||= {}
name = expanded_key name
initial = options.has_key?(:initial) ? options[:initial] : amount
expires_in = options[:expires_in]
instrument(:increment, name, :amount => amount) do
with { |c| c.incr(name, amount, expires_in, initial) }
end
rescue Dalli::DalliError => e
logger.error("DalliError: #{e.message}") if logger
raise if raise_errors?
nil
end
# Decrement a cached value. This method uses the memcached decr atomic
# operator and can only be used on values written with the :raw option.
# Calling it on a value not stored with :raw will fail.
# :initial defaults to zero, as if the counter was initially zero.
# memcached counters cannot hold negative values.
def decrement(name, amount = 1, options=nil)
options ||= {}
name = expanded_key name
initial = options.has_key?(:initial) ? options[:initial] : 0
expires_in = options[:expires_in]
instrument(:decrement, name, :amount => amount) do
with { |c| c.decr(name, amount, expires_in, initial) }
end
rescue Dalli::DalliError => e
logger.error("DalliError: #{e.message}") if logger
raise if raise_errors?
nil
end
# Clear the entire cache on all memcached servers. This method should
# be used with care when using a shared cache.
def clear(options=nil)
instrument(:clear, 'flushing all keys') do
with { |c| c.flush_all }
end
rescue Dalli::DalliError => e
logger.error("DalliError: #{e.message}") if logger
raise if raise_errors?
nil
end
# Clear any local cache
def cleanup(options=nil)
end
# Get the statistics from the memcached servers.
def stats
with { |c| c.stats }
end
def reset
with { |c| c.reset }
end
def logger
Dalli.logger
end
def logger=(new_logger)
Dalli.logger = new_logger
end
protected
# Read an entry from the cache.
def read_entry(key, options) # :nodoc:
entry = with { |c| c.get(key, options) }
# NB Backwards data compatibility, to be removed at some point
entry.is_a?(ActiveSupport::Cache::Entry) ? entry.value : entry
rescue Dalli::DalliError => e
logger.error("DalliError: #{e.message}") if logger
raise if raise_errors?
nil
end
# Write an entry to the cache.
def write_entry(key, value, options) # :nodoc:
# cleanup LocalCache
cleanup if options[:unless_exist]
method = options[:unless_exist] ? :add : :set
expires_in = options[:expires_in]
connection = options.delete(:connection)
connection.send(method, key, value, expires_in, options)
rescue Dalli::DalliError => e
logger.error("DalliError: #{e.message}") if logger
raise if raise_errors?
false
end
# Delete an entry from the cache.
def delete_entry(key, options) # :nodoc:
with { |c| c.delete(key) }
rescue Dalli::DalliError => e
logger.error("DalliError: #{e.message}") if logger
raise if raise_errors?
false
end
private
# Expand key to be a consistent string value. Invoke +cache_key+ if
# object responds to +cache_key+. Otherwise, to_param method will be
# called. If the key is a Hash, then keys will be sorted alphabetically.
def expanded_key(key) # :nodoc:
return key.cache_key.to_s if key.respond_to?(:cache_key)
case key
when Array
if key.size > 1
key = key.collect{|element| expanded_key(element)}
else
key = key.first
end
when Hash
key = key.sort_by { |k,_| k.to_s }.collect{|k,v| "#{k}=#{v}"}
end
key = key.to_param
if key.respond_to? :force_encoding
key = key.dup
key.force_encoding('binary')
end
key
end
def instrument(operation, key, options=nil)
log(operation, key, options)
payload = { :key => key }
payload.merge!(options) if options.is_a?(Hash)
ActiveSupport::Notifications.instrument("cache_#{operation}.active_support", payload){ yield(payload) }
end
def log(operation, key, options=nil)
return unless logger && logger.debug? && !silence?
logger.debug("Cache #{operation}: #{key}#{options.blank? ? "" : " (#{options.inspect})"}")
end
def raise_errors?
!!@options[:raise_errors]
end
end
end
end

View File

@ -0,0 +1,46 @@
require 'dalli/compressor'
require 'dalli/client'
require 'dalli/ring'
require 'dalli/server'
require 'dalli/socket'
require 'dalli/version'
require 'dalli/options'
require 'dalli/railtie' if defined?(::Rails::Railtie)
module Dalli
# generic error
class DalliError < RuntimeError; end
# socket/server communication error
class NetworkError < DalliError; end
# no server available/alive error
class RingError < DalliError; end
# application error in marshalling serialization
class MarshalError < DalliError; end
# application error in marshalling deserialization or decompression
class UnmarshalError < DalliError; end
def self.logger
@logger ||= (rails_logger || default_logger)
end
def self.rails_logger
(defined?(Rails) && Rails.respond_to?(:logger) && Rails.logger) ||
(defined?(RAILS_DEFAULT_LOGGER) && RAILS_DEFAULT_LOGGER.respond_to?(:debug) && RAILS_DEFAULT_LOGGER)
end
def self.default_logger
require 'logger'
l = Logger.new(STDOUT)
l.level = Logger::INFO
l
end
def self.logger=(logger)
@logger = logger
end
end
if defined?(RAILS_VERSION) && RAILS_VERSION < '3'
raise Dalli::DalliError, "Dalli #{Dalli::VERSION} does not support Rails version < 3.0"
end

View File

@ -0,0 +1,58 @@
require 'dalli/client'
module Dalli
class Client
##
# Get the value and CAS ID associated with the key. If a block is provided,
# value and CAS will be passed to the block.
def get_cas(key)
(value, cas) = perform(:cas, key)
value = (!value || value == 'Not found') ? nil : value
if block_given?
yield value, cas
else
[value, cas]
end
end
##
# Fetch multiple keys efficiently, including available metadata such as CAS.
# If a block is given, yields key/data pairs one a time. Data is an array:
# [value, cas_id]
# If no block is given, returns a hash of
# { 'key' => [value, cas_id] }
def get_multi_cas(*keys)
if block_given?
get_multi_yielder(keys) {|*args| yield(*args)}
else
Hash.new.tap do |hash|
get_multi_yielder(keys) {|k, data| hash[k] = data}
end
end
end
##
# Set the key-value pair, verifying existing CAS.
# Returns the resulting CAS value if succeeded, and falsy otherwise.
def set_cas(key, value, cas, ttl=nil, options=nil)
ttl ||= @options[:expires_in].to_i
perform(:set, key, value, ttl, cas, options)
end
##
# Conditionally add a key/value pair, verifying existing CAS, only if the
# key already exists on the server. Returns the new CAS value if the
# operation succeeded, or falsy otherwise.
def replace_cas(key, value, cas, ttl=nil, options=nil)
ttl ||= @options[:expires_in].to_i
perform(:replace, key, value, ttl, cas, options)
end
# Delete a key/value pair, verifying existing CAS.
# Returns true if succeeded, and falsy otherwise.
def delete_cas(key, cas=0)
perform(:delete, key, cas)
end
end
end

View File

@ -0,0 +1,439 @@
require 'digest/md5'
require 'set'
# encoding: ascii
module Dalli
class Client
##
# Dalli::Client is the main class which developers will use to interact with
# the memcached server. Usage:
#
# Dalli::Client.new(['localhost:11211:10', 'cache-2.example.com:11211:5', '192.168.0.1:22122:5'],
# :threadsafe => true, :failover => true, :expires_in => 300)
#
# servers is an Array of "host:port:weight" where weight allows you to distribute cache unevenly.
# Both weight and port are optional. If you pass in nil, Dalli will use the <tt>MEMCACHE_SERVERS</tt>
# environment variable or default to 'localhost:11211' if it is not present.
#
# Options:
# - :namespace - prepend each key with this value to provide simple namespacing.
# - :failover - if a server is down, look for and store values on another server in the ring. Default: true.
# - :threadsafe - ensure that only one thread is actively using a socket at a time. Default: true.
# - :expires_in - default TTL in seconds if you do not pass TTL as a parameter to an individual operation, defaults to 0 or forever
# - :compress - defaults to false, if true Dalli will compress values larger than 1024 bytes before sending them to memcached.
# - :serializer - defaults to Marshal
# - :compressor - defaults to zlib
#
def initialize(servers=nil, options={})
@servers = normalize_servers(servers || ENV["MEMCACHE_SERVERS"] || '127.0.0.1:11211')
@options = normalize_options(options)
@ring = nil
end
#
# The standard memcached instruction set
#
##
# Turn on quiet aka noreply support.
# All relevant operations within this block will be effectively
# pipelined as Dalli will use 'quiet' operations where possible.
# Currently supports the set, add, replace and delete operations.
def multi
old, Thread.current[:dalli_multi] = Thread.current[:dalli_multi], true
yield
ensure
Thread.current[:dalli_multi] = old
end
##
# Get the value associated with the key.
def get(key, options=nil)
perform(:get, key)
end
##
# Fetch multiple keys efficiently.
# If a block is given, yields key/value pairs one at a time.
# Otherwise returns a hash of { 'key' => 'value', 'key2' => 'value1' }
def get_multi(*keys)
if block_given?
get_multi_yielder(keys) {|k, data| yield k, data.first}
else
Hash.new.tap do |hash|
get_multi_yielder(keys) {|k, data| hash[k] = data.first}
end
end
end
def fetch(key, ttl=nil, options=nil)
ttl ||= @options[:expires_in].to_i
val = get(key, options)
if val.nil? && block_given?
val = yield
add(key, val, ttl, options)
end
val
end
##
# compare and swap values using optimistic locking.
# Fetch the existing value for key.
# If it exists, yield the value to the block.
# Add the block's return value as the new value for the key.
# Add will fail if someone else changed the value.
#
# Returns:
# - nil if the key did not exist.
# - false if the value was changed by someone else.
# - true if the value was successfully updated.
def cas(key, ttl=nil, options=nil, &block)
ttl ||= @options[:expires_in].to_i
(value, cas) = perform(:cas, key)
value = (!value || value == 'Not found') ? nil : value
if value
newvalue = block.call(value)
perform(:set, key, newvalue, ttl, cas, options)
end
end
def set(key, value, ttl=nil, options=nil)
ttl ||= @options[:expires_in].to_i
perform(:set, key, value, ttl, 0, options)
end
##
# Conditionally add a key/value pair, if the key does not already exist
# on the server. Returns truthy if the operation succeeded.
def add(key, value, ttl=nil, options=nil)
ttl ||= @options[:expires_in].to_i
perform(:add, key, value, ttl, options)
end
##
# Conditionally add a key/value pair, only if the key already exists
# on the server. Returns truthy if the operation succeeded.
def replace(key, value, ttl=nil, options=nil)
ttl ||= @options[:expires_in].to_i
perform(:replace, key, value, ttl, 0, options)
end
def delete(key)
perform(:delete, key, 0)
end
##
# Append value to the value already stored on the server for 'key'.
# Appending only works for values stored with :raw => true.
def append(key, value)
perform(:append, key, value.to_s)
end
##
# Prepend value to the value already stored on the server for 'key'.
# Prepending only works for values stored with :raw => true.
def prepend(key, value)
perform(:prepend, key, value.to_s)
end
def flush(delay=0)
time = -delay
ring.servers.map { |s| s.request(:flush, time += delay) }
end
alias_method :flush_all, :flush
##
# Incr adds the given amount to the counter on the memcached server.
# Amt must be a positive integer value.
#
# If default is nil, the counter must already exist or the operation
# will fail and will return nil. Otherwise this method will return
# the new value for the counter.
#
# Note that the ttl will only apply if the counter does not already
# exist. To increase an existing counter and update its TTL, use
# #cas.
def incr(key, amt=1, ttl=nil, default=nil)
raise ArgumentError, "Positive values only: #{amt}" if amt < 0
ttl ||= @options[:expires_in].to_i
perform(:incr, key, amt.to_i, ttl, default)
end
##
# Decr subtracts the given amount from the counter on the memcached server.
# Amt must be a positive integer value.
#
# memcached counters are unsigned and cannot hold negative values. Calling
# decr on a counter which is 0 will just return 0.
#
# If default is nil, the counter must already exist or the operation
# will fail and will return nil. Otherwise this method will return
# the new value for the counter.
#
# Note that the ttl will only apply if the counter does not already
# exist. To decrease an existing counter and update its TTL, use
# #cas.
def decr(key, amt=1, ttl=nil, default=nil)
raise ArgumentError, "Positive values only: #{amt}" if amt < 0
ttl ||= @options[:expires_in].to_i
perform(:decr, key, amt.to_i, ttl, default)
end
##
# Touch updates expiration time for a given key.
#
# Returns true if key exists, otherwise nil.
def touch(key, ttl=nil)
ttl ||= @options[:expires_in].to_i
resp = perform(:touch, key, ttl)
resp.nil? ? nil : true
end
##
# Collect the stats for each server.
# You can optionally pass a type including :items or :slabs to get specific stats
# Returns a hash like { 'hostname:port' => { 'stat1' => 'value1', ... }, 'hostname2:port' => { ... } }
def stats(type=nil)
type = nil if ![nil, :items,:slabs].include? type
values = {}
ring.servers.each do |server|
values["#{server.hostname}:#{server.port}"] = server.alive? ? server.request(:stats,type.to_s) : nil
end
values
end
##
# Reset stats for each server.
def reset_stats
ring.servers.map do |server|
server.alive? ? server.request(:reset_stats) : nil
end
end
##
## Make sure memcache servers are alive, or raise an Dalli::RingError
def alive!
ring.server_for_key("")
end
##
## Version of the memcache servers.
def version
values = {}
ring.servers.each do |server|
values["#{server.hostname}:#{server.port}"] = server.alive? ? server.request(:version) : nil
end
values
end
##
# Close our connection to each server.
# If you perform another operation after this, the connections will be re-established.
def close
if @ring
@ring.servers.each { |s| s.close }
@ring = nil
end
end
alias_method :reset, :close
# Stub method so a bare Dalli client can pretend to be a connection pool.
def with
yield self
end
private
def groups_for_keys(*keys)
groups = mapped_keys(keys).flatten.group_by do |key|
begin
ring.server_for_key(key)
rescue Dalli::RingError
Dalli.logger.debug { "unable to get key #{key}" }
nil
end
end
return groups
end
def mapped_keys(keys)
keys.flatten.map {|a| validate_key(a.to_s)}
end
def make_multi_get_requests(groups)
groups.each do |server, keys_for_server|
begin
# TODO: do this with the perform chokepoint?
# But given the fact that fetching the response doesn't take place
# in that slot it's misleading anyway. Need to move all of this method
# into perform to be meaningful
server.request(:send_multiget, keys_for_server)
rescue DalliError, NetworkError => e
Dalli.logger.debug { e.inspect }
Dalli.logger.debug { "unable to get keys for server #{server.hostname}:#{server.port}" }
end
end
end
def perform_multi_response_start(servers)
servers.each do |server|
next unless server.alive?
begin
server.multi_response_start
rescue DalliError, NetworkError => e
Dalli.logger.debug { e.inspect }
Dalli.logger.debug { "results from this server will be missing" }
servers.delete(server)
end
end
servers
end
##
# Normalizes the argument into an array of servers. If the argument is a string, it's expected to be of
# the format "memcache1.example.com:11211[,memcache2.example.com:11211[,memcache3.example.com:11211[...]]]
def normalize_servers(servers)
if servers.is_a? String
return servers.split(",")
else
return servers
end
end
def ring
@ring ||= Dalli::Ring.new(
@servers.map do |s|
server_options = {}
if s =~ %r{\Amemcached://}
uri = URI.parse(s)
server_options[:username] = uri.user
server_options[:password] = uri.password
s = "#{uri.host}:#{uri.port}"
end
Dalli::Server.new(s, @options.merge(server_options))
end, @options
)
end
# Chokepoint method for instrumentation
def perform(*all_args, &blk)
return blk.call if blk
op, key, *args = *all_args
key = key.to_s
key = validate_key(key)
begin
server = ring.server_for_key(key)
ret = server.request(op, key, *args)
ret
rescue NetworkError => e
Dalli.logger.debug { e.inspect }
Dalli.logger.debug { "retrying request with new server" }
retry
end
end
def validate_key(key)
raise ArgumentError, "key cannot be blank" if !key || key.length == 0
key = key_with_namespace(key)
if key.length > 250
max_length_before_namespace = 212 - (namespace || '').size
key = "#{key[0, max_length_before_namespace]}:md5:#{Digest::MD5.hexdigest(key)}"
end
return key
end
def key_with_namespace(key)
(ns = namespace) ? "#{ns}:#{key}" : key
end
def key_without_namespace(key)
(ns = namespace) ? key.sub(%r(\A#{ns}:), '') : key
end
def namespace
return nil unless @options[:namespace]
@options[:namespace].is_a?(Proc) ? @options[:namespace].call.to_s : @options[:namespace].to_s
end
def normalize_options(opts)
if opts[:compression]
Dalli.logger.warn "DEPRECATED: Dalli's :compression option is now just :compress => true. Please update your configuration."
opts[:compress] = opts.delete(:compression)
end
begin
opts[:expires_in] = opts[:expires_in].to_i if opts[:expires_in]
rescue NoMethodError
raise ArgumentError, "cannot convert :expires_in => #{opts[:expires_in].inspect} to an integer"
end
opts
end
##
# Yields, one at a time, keys and their values+attributes.
def get_multi_yielder(keys)
perform do
return {} if keys.empty?
ring.lock do
begin
groups = groups_for_keys(keys)
if unfound_keys = groups.delete(nil)
Dalli.logger.debug { "unable to get keys for #{unfound_keys.length} keys because no matching server was found" }
end
make_multi_get_requests(groups)
servers = groups.keys
return if servers.empty?
servers = perform_multi_response_start(servers)
start = Time.now
loop do
# remove any dead servers
servers.delete_if { |s| s.sock.nil? }
break if servers.empty?
# calculate remaining timeout
elapsed = Time.now - start
timeout = servers.first.options[:socket_timeout]
if elapsed > timeout
readable = nil
else
sockets = servers.map(&:sock)
readable, _ = IO.select(sockets, nil, nil, timeout - elapsed)
end
if readable.nil?
# no response within timeout; abort pending connections
servers.each do |server|
Dalli.logger.debug { "memcached at #{server.name} did not response within timeout" }
server.multi_response_abort
end
break
else
readable.each do |sock|
server = sock.server
begin
server.multi_response_nonblock.each_pair do |key, value_list|
yield key_without_namespace(key), value_list
end
if server.multi_response_completed?
servers.delete(server)
end
rescue NetworkError
servers.delete(server)
end
end
end
end
end
end
end
end
end
end

View File

@ -0,0 +1,29 @@
require 'zlib'
require 'stringio'
module Dalli
class Compressor
def self.compress(data)
Zlib::Deflate.deflate(data)
end
def self.decompress(data)
Zlib::Inflate.inflate(data)
end
end
class GzipCompressor
def self.compress(data)
io = StringIO.new("w")
gz = Zlib::GzipWriter.new(io)
gz.write(data)
gz.close
io.string
end
def self.decompress(data)
io = StringIO.new(data, "rb")
Zlib::GzipReader.new(io).read
end
end
end

View File

@ -0,0 +1,64 @@
require 'thread'
require 'monitor'
module Dalli
# Make Dalli threadsafe by using a lock around all
# public server methods.
#
# Dalli::Server.extend(Dalli::Threadsafe)
#
module Threadsafe
def self.extended(obj)
obj.init_threadsafe
end
def request(op, *args)
@lock.synchronize do
super
end
end
def alive?
@lock.synchronize do
super
end
end
def close
@lock.synchronize do
super
end
end
def multi_response_start
@lock.synchronize do
super
end
end
def multi_response_nonblock
@lock.synchronize do
super
end
end
def multi_response_abort
@lock.synchronize do
super
end
end
def lock!
@lock.mon_enter
end
def unlock!
@lock.mon_exit
end
def init_threadsafe
@lock = Monitor.new
end
end
end

View File

@ -0,0 +1,7 @@
module Dalli
class Railtie < ::Rails::Railtie
config.before_configuration do
config.cache_store = :dalli_store
end
end
end

View File

@ -0,0 +1,142 @@
require 'digest/sha1'
require 'zlib'
module Dalli
class Ring
POINTS_PER_SERVER = 160 # this is the default in libmemcached
attr_accessor :servers, :continuum
def initialize(servers, options)
@servers = servers
@continuum = nil
if servers.size > 1
total_weight = servers.inject(0) { |memo, srv| memo + srv.weight }
continuum = []
servers.each do |server|
entry_count_for(server, servers.size, total_weight).times do |idx|
hash = Digest::SHA1.hexdigest("#{server.hostname}:#{server.port}:#{idx}")
value = Integer("0x#{hash[0..7]}")
continuum << Dalli::Ring::Entry.new(value, server)
end
end
@continuum = continuum.sort { |a, b| a.value <=> b.value }
end
threadsafe! unless options[:threadsafe] == false
@failover = options[:failover] != false
end
def server_for_key(key)
if @continuum
hkey = hash_for(key)
20.times do |try|
entryidx = binary_search(@continuum, hkey)
server = @continuum[entryidx].server
return server if server.alive?
break unless @failover
hkey = hash_for("#{try}#{key}")
end
else
server = @servers.first
return server if server && server.alive?
end
raise Dalli::RingError, "No server available"
end
def lock
@servers.each { |s| s.lock! }
begin
return yield
ensure
@servers.each { |s| s.unlock! }
end
end
private
def threadsafe!
@servers.each do |s|
s.extend(Dalli::Threadsafe)
end
end
def hash_for(key)
Zlib.crc32(key)
end
def entry_count_for(server, total_servers, total_weight)
((total_servers * POINTS_PER_SERVER * server.weight) / Float(total_weight)).floor
end
# Native extension to perform the binary search within the continuum
# space. Fallback to a pure Ruby version if the compilation doesn't work.
# optional for performance and only necessary if you are using multiple
# memcached servers.
begin
require 'inline'
inline do |builder|
builder.c <<-EOM
int binary_search(VALUE ary, unsigned int r) {
long upper = RARRAY_LEN(ary) - 1;
long lower = 0;
long idx = 0;
ID value = rb_intern("value");
VALUE continuumValue;
unsigned int l;
while (lower <= upper) {
idx = (lower + upper) / 2;
continuumValue = rb_funcall(RARRAY_PTR(ary)[idx], value, 0);
l = NUM2UINT(continuumValue);
if (l == r) {
return idx;
}
else if (l > r) {
upper = idx - 1;
}
else {
lower = idx + 1;
}
}
return upper;
}
EOM
end
rescue LoadError
# Find the closest index in the Ring with value <= the given value
def binary_search(ary, value)
upper = ary.size - 1
lower = 0
idx = 0
while (lower <= upper) do
idx = (lower + upper) / 2
comp = ary[idx].value <=> value
if comp == 0
return idx
elsif comp > 0
upper = idx - 1
else
lower = idx + 1
end
end
return upper
end
end
class Entry
attr_reader :value
attr_reader :server
def initialize(val, srv)
@value = val
@server = srv
end
end
end
end

View File

@ -0,0 +1,696 @@
require 'socket'
require 'timeout'
module Dalli
class Server
attr_accessor :hostname
attr_accessor :port
attr_accessor :weight
attr_accessor :options
attr_reader :sock
DEFAULTS = {
# seconds between trying to contact a remote server
:down_retry_delay => 1,
# connect/read/write timeout for socket operations
:socket_timeout => 0.5,
# times a socket operation may fail before considering the server dead
:socket_max_failures => 2,
# amount of time to sleep between retries when a failure occurs
:socket_failure_delay => 0.01,
# max size of value in bytes (default is 1 MB, can be overriden with "memcached -I <size>")
:value_max_bytes => 1024 * 1024,
:compressor => Compressor,
# min byte size to attempt compression
:compression_min_size => 1024,
# max byte size for compression
:compression_max_size => false,
:serializer => Marshal,
:username => nil,
:password => nil,
:keepalive => true
}
def initialize(attribs, options = {})
(@hostname, @port, @weight) = parse_hostname(attribs)
@port ||= 11211
@port = Integer(@port)
@weight ||= 1
@weight = Integer(@weight)
@fail_count = 0
@down_at = nil
@last_down_at = nil
@options = DEFAULTS.merge(options)
@sock = nil
@msg = nil
@error = nil
@pid = nil
@inprogress = nil
end
def name
"#{@hostname}:#{@port}"
end
# Chokepoint method for instrumentation
def request(op, *args)
verify_state
raise Dalli::NetworkError, "#{hostname}:#{port} is down: #{@error} #{@msg}. If you are sure it is running, ensure memcached version is > 1.4." unless alive?
begin
send(op, *args)
rescue Dalli::NetworkError
raise
rescue Dalli::MarshalError => ex
Dalli.logger.error "Marshalling error for key '#{args.first}': #{ex.message}"
Dalli.logger.error "You are trying to cache a Ruby object which cannot be serialized to memcached."
Dalli.logger.error ex.backtrace.join("\n\t")
false
rescue Dalli::DalliError
raise
rescue => ex
Dalli.logger.error "Unexpected exception in Dalli: #{ex.class.name}: #{ex.message}"
Dalli.logger.error "This is a bug in Dalli, please enter an issue in Github if it does not already exist."
Dalli.logger.error ex.backtrace.join("\n\t")
down!
end
end
def alive?
return true if @sock
if @last_down_at && @last_down_at + options[:down_retry_delay] >= Time.now
time = @last_down_at + options[:down_retry_delay] - Time.now
Dalli.logger.debug { "down_retry_delay not reached for #{hostname}:#{port} (%.3f seconds left)" % time }
return false
end
connect
!!@sock
rescue Dalli::NetworkError
false
end
def close
return unless @sock
@sock.close rescue nil
@sock = nil
@pid = nil
@inprogress = false
end
def lock!
end
def unlock!
end
def serializer
@options[:serializer]
end
def compressor
@options[:compressor]
end
# Start reading key/value pairs from this connection. This is usually called
# after a series of GETKQ commands. A NOOP is sent, and the server begins
# flushing responses for kv pairs that were found.
#
# Returns nothing.
def multi_response_start
verify_state
write_noop
@multi_buffer = ''
@position = 0
@inprogress = true
end
# Did the last call to #multi_response_start complete successfully?
def multi_response_completed?
@multi_buffer.nil?
end
# Attempt to receive and parse as many key/value pairs as possible
# from this server. After #multi_response_start, this should be invoked
# repeatedly whenever this server's socket is readable until
# #multi_response_completed?.
#
# Returns a Hash of kv pairs received.
def multi_response_nonblock
raise 'multi_response has completed' if @multi_buffer.nil?
@multi_buffer << @sock.read_available
buf = @multi_buffer
pos = @position
values = {}
while buf.bytesize - pos >= 24
header = buf.slice(pos, 24)
(key_length, _, body_length, cas) = header.unpack(KV_HEADER)
if key_length == 0
# all done!
@multi_buffer = nil
@position = nil
@inprogress = false
break
elsif buf.bytesize - pos >= 24 + body_length
flags = buf.slice(pos + 24, 4).unpack('N')[0]
key = buf.slice(pos + 24 + 4, key_length)
value = buf.slice(pos + 24 + 4 + key_length, body_length - key_length - 4) if body_length - key_length - 4 > 0
pos = pos + 24 + body_length
begin
values[key] = [deserialize(value, flags), cas]
rescue DalliError
end
else
# not enough data yet, wait for more
break
end
end
@position = pos
values
rescue SystemCallError, Timeout::Error, EOFError => e
failure!(e)
end
# Abort an earlier #multi_response_start. Used to signal an external
# timeout. The underlying socket is disconnected, and the exception is
# swallowed.
#
# Returns nothing.
def multi_response_abort
@multi_buffer = nil
@position = nil
@inprogress = false
failure!(RuntimeError.new('External timeout'))
rescue NetworkError
true
end
# NOTE: Additional public methods should be overridden in Dalli::Threadsafe
private
def verify_state
failure!(RuntimeError.new('Already writing to socket')) if @inprogress
failure!(RuntimeError.new('Cannot share client between multiple processes')) if @pid && @pid != Process.pid
end
def failure!(exception)
message = "#{hostname}:#{port} failed (count: #{@fail_count}) #{exception.class}: #{exception.message}"
Dalli.logger.info { message }
@fail_count += 1
if @fail_count >= options[:socket_max_failures]
down!
else
close
sleep(options[:socket_failure_delay]) if options[:socket_failure_delay]
raise Dalli::NetworkError, "Socket operation failed, retrying..."
end
end
def down!
close
@last_down_at = Time.now
if @down_at
time = Time.now - @down_at
Dalli.logger.debug { "#{hostname}:#{port} is still down (for %.3f seconds now)" % time }
else
@down_at = @last_down_at
Dalli.logger.warn { "#{hostname}:#{port} is down" }
end
@error = $! && $!.class.name
@msg = @msg || ($! && $!.message && !$!.message.empty? && $!.message)
raise Dalli::NetworkError, "#{hostname}:#{port} is down: #{@error} #{@msg}"
end
def up!
if @down_at
time = Time.now - @down_at
Dalli.logger.warn { "#{hostname}:#{port} is back (downtime was %.3f seconds)" % time }
end
@fail_count = 0
@down_at = nil
@last_down_at = nil
@msg = nil
@error = nil
end
def multi?
Thread.current[:dalli_multi]
end
def get(key)
req = [REQUEST, OPCODES[:get], key.bytesize, 0, 0, 0, key.bytesize, 0, 0, key].pack(FORMAT[:get])
write(req)
generic_response(true)
end
def send_multiget(keys)
req = ""
keys.each do |key|
req << [REQUEST, OPCODES[:getkq], key.bytesize, 0, 0, 0, key.bytesize, 0, 0, key].pack(FORMAT[:getkq])
end
# Could send noop here instead of in multi_response_start
write(req)
end
def set(key, value, ttl, cas, options)
(value, flags) = serialize(key, value, options)
ttl = sanitize_ttl(ttl)
guard_max_value(key, value) do
req = [REQUEST, OPCODES[multi? ? :setq : :set], key.bytesize, 8, 0, 0, value.bytesize + key.bytesize + 8, 0, cas, flags, ttl, key, value].pack(FORMAT[:set])
write(req)
cas_response unless multi?
end
end
def add(key, value, ttl, options)
(value, flags) = serialize(key, value, options)
ttl = sanitize_ttl(ttl)
guard_max_value(key, value) do
req = [REQUEST, OPCODES[multi? ? :addq : :add], key.bytesize, 8, 0, 0, value.bytesize + key.bytesize + 8, 0, 0, flags, ttl, key, value].pack(FORMAT[:add])
write(req)
cas_response unless multi?
end
end
def replace(key, value, ttl, cas, options)
(value, flags) = serialize(key, value, options)
ttl = sanitize_ttl(ttl)
guard_max_value(key, value) do
req = [REQUEST, OPCODES[multi? ? :replaceq : :replace], key.bytesize, 8, 0, 0, value.bytesize + key.bytesize + 8, 0, cas, flags, ttl, key, value].pack(FORMAT[:replace])
write(req)
cas_response unless multi?
end
end
def delete(key, cas)
req = [REQUEST, OPCODES[multi? ? :deleteq : :delete], key.bytesize, 0, 0, 0, key.bytesize, 0, cas, key].pack(FORMAT[:delete])
write(req)
generic_response unless multi?
end
def flush(ttl)
req = [REQUEST, OPCODES[:flush], 0, 4, 0, 0, 4, 0, 0, 0].pack(FORMAT[:flush])
write(req)
generic_response
end
def decr_incr(opcode, key, count, ttl, default)
expiry = default ? sanitize_ttl(ttl) : 0xFFFFFFFF
default ||= 0
(h, l) = split(count)
(dh, dl) = split(default)
req = [REQUEST, OPCODES[opcode], key.bytesize, 20, 0, 0, key.bytesize + 20, 0, 0, h, l, dh, dl, expiry, key].pack(FORMAT[opcode])
write(req)
body = generic_response
body ? body.unpack('Q>').first : body
end
def decr(key, count, ttl, default)
decr_incr :decr, key, count, ttl, default
end
def incr(key, count, ttl, default)
decr_incr :incr, key, count, ttl, default
end
def write_append_prepend(opcode, key, value)
write_generic [REQUEST, OPCODES[opcode], key.bytesize, 0, 0, 0, value.bytesize + key.bytesize, 0, 0, key, value].pack(FORMAT[opcode])
end
def write_generic(bytes)
write(bytes)
generic_response
end
def write_noop
req = [REQUEST, OPCODES[:noop], 0, 0, 0, 0, 0, 0, 0].pack(FORMAT[:noop])
write(req)
end
# Noop is a keepalive operation but also used to demarcate the end of a set of pipelined commands.
# We need to read all the responses at once.
def noop
write_noop
multi_response
end
def append(key, value)
write_append_prepend :append, key, value
end
def prepend(key, value)
write_append_prepend :prepend, key, value
end
def stats(info='')
req = [REQUEST, OPCODES[:stat], info.bytesize, 0, 0, 0, info.bytesize, 0, 0, info].pack(FORMAT[:stat])
write(req)
keyvalue_response
end
def reset_stats
write_generic [REQUEST, OPCODES[:stat], 'reset'.bytesize, 0, 0, 0, 'reset'.bytesize, 0, 0, 'reset'].pack(FORMAT[:stat])
end
def cas(key)
req = [REQUEST, OPCODES[:get], key.bytesize, 0, 0, 0, key.bytesize, 0, 0, key].pack(FORMAT[:get])
write(req)
data_cas_response
end
def version
write_generic [REQUEST, OPCODES[:version], 0, 0, 0, 0, 0, 0, 0].pack(FORMAT[:noop])
end
def touch(key, ttl)
ttl = sanitize_ttl(ttl)
write_generic [REQUEST, OPCODES[:touch], key.bytesize, 4, 0, 0, key.bytesize + 4, 0, 0, ttl, key].pack(FORMAT[:touch])
end
# http://www.hjp.at/zettel/m/memcached_flags.rxml
# Looks like most clients use bit 0 to indicate native language serialization
# and bit 1 to indicate gzip compression.
FLAG_SERIALIZED = 0x1
FLAG_COMPRESSED = 0x2
def serialize(key, value, options=nil)
marshalled = false
value = unless options && options[:raw]
marshalled = true
begin
self.serializer.dump(value)
rescue => ex
# Marshalling can throw several different types of generic Ruby exceptions.
# Convert to a specific exception so we can special case it higher up the stack.
exc = Dalli::MarshalError.new(ex.message)
exc.set_backtrace ex.backtrace
raise exc
end
else
value.to_s
end
compressed = false
if @options[:compress] && value.bytesize >= @options[:compression_min_size] &&
(!@options[:compression_max_size] || value.bytesize <= @options[:compression_max_size])
value = self.compressor.compress(value)
compressed = true
end
flags = 0
flags |= FLAG_COMPRESSED if compressed
flags |= FLAG_SERIALIZED if marshalled
[value, flags]
end
def deserialize(value, flags)
value = self.compressor.decompress(value) if (flags & FLAG_COMPRESSED) != 0
value = self.serializer.load(value) if (flags & FLAG_SERIALIZED) != 0
value
rescue TypeError
raise if $!.message !~ /needs to have method `_load'|exception class\/object expected|instance of IO needed|incompatible marshal file format/
raise UnmarshalError, "Unable to unmarshal value: #{$!.message}"
rescue ArgumentError
raise if $!.message !~ /undefined class|marshal data too short/
raise UnmarshalError, "Unable to unmarshal value: #{$!.message}"
rescue Zlib::Error
raise UnmarshalError, "Unable to uncompress value: #{$!.message}"
end
def data_cas_response
(extras, _, status, count, _, cas) = read_header.unpack(CAS_HEADER)
data = read(count) if count > 0
if status == 1
nil
elsif status != 0
raise Dalli::DalliError, "Response error #{status}: #{RESPONSE_CODES[status]}"
elsif data
flags = data[0...extras].unpack('N')[0]
value = data[extras..-1]
data = deserialize(value, flags)
end
[data, cas]
end
CAS_HEADER = '@4CCnNNQ'
NORMAL_HEADER = '@4CCnN'
KV_HEADER = '@2n@6nN@16Q'
def guard_max_value(key, value)
if value.bytesize <= @options[:value_max_bytes]
yield
else
Dalli.logger.warn "Value for #{key} over max size: #{@options[:value_max_bytes]} <= #{value.bytesize}"
false
end
end
# https://code.google.com/p/memcached/wiki/NewCommands#Standard_Protocol
# > An expiration time, in seconds. Can be up to 30 days. After 30 days, is treated as a unix timestamp of an exact date.
MAX_ACCEPTABLE_EXPIRATION_INTERVAL = 30*24*60*60 # 30 days
def sanitize_ttl(ttl)
if ttl > MAX_ACCEPTABLE_EXPIRATION_INTERVAL
Dalli.logger.debug "Expiration interval too long for Memcached, converting to an expiration timestamp"
Time.now.to_i + ttl
else
ttl
end
end
def generic_response(unpack=false)
(extras, _, status, count) = read_header.unpack(NORMAL_HEADER)
data = read(count) if count > 0
if status == 1
nil
elsif status == 2 || status == 5
false # Not stored, normal status for add operation
elsif status != 0
raise Dalli::DalliError, "Response error #{status}: #{RESPONSE_CODES[status]}"
elsif data
flags = data[0...extras].unpack('N')[0]
value = data[extras..-1]
unpack ? deserialize(value, flags) : value
else
true
end
end
def cas_response
(_, _, status, count, _, cas) = read_header.unpack(CAS_HEADER)
read(count) if count > 0 # this is potential data that we don't care about
if status == 1
nil
elsif status == 2 || status == 5
false # Not stored, normal status for add operation
elsif status != 0
raise Dalli::DalliError, "Response error #{status}: #{RESPONSE_CODES[status]}"
else
cas
end
end
def keyvalue_response
hash = {}
loop do
(key_length, _, body_length, _) = read_header.unpack(KV_HEADER)
return hash if key_length == 0
key = read(key_length)
value = read(body_length - key_length) if body_length - key_length > 0
hash[key] = value
end
end
def multi_response
hash = {}
loop do
(key_length, _, body_length, _) = read_header.unpack(KV_HEADER)
return hash if key_length == 0
flags = read(4).unpack('N')[0]
key = read(key_length)
value = read(body_length - key_length - 4) if body_length - key_length - 4 > 0
hash[key] = deserialize(value, flags)
end
end
def write(bytes)
begin
@inprogress = true
result = @sock.write(bytes)
@inprogress = false
result
rescue SystemCallError, Timeout::Error => e
failure!(e)
end
end
def read(count)
begin
@inprogress = true
data = @sock.readfull(count)
@inprogress = false
data
rescue SystemCallError, Timeout::Error, EOFError => e
failure!(e)
end
end
def read_header
read(24) || raise(Dalli::NetworkError, 'No response')
end
def connect
Dalli.logger.debug { "Dalli::Server#connect #{hostname}:#{port}" }
begin
@pid = Process.pid
@sock = KSocket.open(hostname, port, self, options)
@version = version # trigger actual connect
sasl_authentication if need_auth?
up!
rescue Dalli::DalliError # SASL auth failure
raise
rescue SystemCallError, Timeout::Error, EOFError, SocketError => e
# SocketError = DNS resolution failure
failure!(e)
end
end
def split(n)
[n >> 32, 0xFFFFFFFF & n]
end
REQUEST = 0x80
RESPONSE = 0x81
RESPONSE_CODES = {
0 => 'No error',
1 => 'Key not found',
2 => 'Key exists',
3 => 'Value too large',
4 => 'Invalid arguments',
5 => 'Item not stored',
6 => 'Incr/decr on a non-numeric value',
0x20 => 'Authentication required',
0x81 => 'Unknown command',
0x82 => 'Out of memory',
}
OPCODES = {
:get => 0x00,
:set => 0x01,
:add => 0x02,
:replace => 0x03,
:delete => 0x04,
:incr => 0x05,
:decr => 0x06,
:flush => 0x08,
:noop => 0x0A,
:version => 0x0B,
:getkq => 0x0D,
:append => 0x0E,
:prepend => 0x0F,
:stat => 0x10,
:setq => 0x11,
:addq => 0x12,
:replaceq => 0x13,
:deleteq => 0x14,
:incrq => 0x15,
:decrq => 0x16,
:auth_negotiation => 0x20,
:auth_request => 0x21,
:auth_continue => 0x22,
:touch => 0x1C,
}
HEADER = "CCnCCnNNQ"
OP_FORMAT = {
:get => 'a*',
:set => 'NNa*a*',
:add => 'NNa*a*',
:replace => 'NNa*a*',
:delete => 'a*',
:incr => 'NNNNNa*',
:decr => 'NNNNNa*',
:flush => 'N',
:noop => '',
:getkq => 'a*',
:version => '',
:stat => 'a*',
:append => 'a*a*',
:prepend => 'a*a*',
:auth_request => 'a*a*',
:auth_continue => 'a*a*',
:touch => 'Na*',
}
FORMAT = OP_FORMAT.inject({}) { |memo, (k, v)| memo[k] = HEADER + v; memo }
#######
# SASL authentication support for NorthScale
#######
def need_auth?
@options[:username] || ENV['MEMCACHE_USERNAME']
end
def username
@options[:username] || ENV['MEMCACHE_USERNAME']
end
def password
@options[:password] || ENV['MEMCACHE_PASSWORD']
end
def sasl_authentication
Dalli.logger.info { "Dalli/SASL authenticating as #{username}" }
# negotiate
req = [REQUEST, OPCODES[:auth_negotiation], 0, 0, 0, 0, 0, 0, 0].pack(FORMAT[:noop])
write(req)
(extras, type, status, count) = read_header.unpack(NORMAL_HEADER)
raise Dalli::NetworkError, "Unexpected message format: #{extras} #{count}" unless extras == 0 && count > 0
content = read(count)
return (Dalli.logger.debug("Authentication not required/supported by server")) if status == 0x81
mechanisms = content.split(' ')
raise NotImplementedError, "Dalli only supports the PLAIN authentication mechanism" if !mechanisms.include?('PLAIN')
# request
mechanism = 'PLAIN'
msg = "\x0#{username}\x0#{password}"
req = [REQUEST, OPCODES[:auth_request], mechanism.bytesize, 0, 0, 0, mechanism.bytesize + msg.bytesize, 0, 0, mechanism, msg].pack(FORMAT[:auth_request])
write(req)
(extras, type, status, count) = read_header.unpack(NORMAL_HEADER)
raise Dalli::NetworkError, "Unexpected message format: #{extras} #{count}" unless extras == 0 && count > 0
content = read(count)
return Dalli.logger.info("Dalli/SASL: #{content}") if status == 0
raise Dalli::DalliError, "Error authenticating: #{status}" unless status == 0x21
raise NotImplementedError, "No two-step authentication mechanisms supported"
# (step, msg) = sasl.receive('challenge', content)
# raise Dalli::NetworkError, "Authentication failed" if sasl.failed? || step != 'response'
end
def parse_hostname(str)
res = str.match(/\A(\[([\h:]+)\]|[^:]+)(:(\d+))?(:(\d+))?\z/)
return res[2] || res[1], res[4], res[6]
end
end
end

View File

@ -0,0 +1,108 @@
begin
require 'kgio'
puts "Using kgio socket IO" if defined?($TESTING) && $TESTING
class Dalli::Server::KSocket < Kgio::Socket
attr_accessor :options, :server
def kgio_wait_readable
IO.select([self], nil, nil, options[:socket_timeout]) || raise(Timeout::Error, "IO timeout")
end
def kgio_wait_writable
IO.select(nil, [self], nil, options[:socket_timeout]) || raise(Timeout::Error, "IO timeout")
end
def self.open(host, port, server, options = {})
addr = Socket.pack_sockaddr_in(port, host)
sock = start(addr)
sock.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_NODELAY, true)
sock.setsockopt(Socket::SOL_SOCKET, Socket::SO_KEEPALIVE, true) if options[:keepalive]
sock.options = options
sock.server = server
sock.kgio_wait_writable
sock
end
alias :write :kgio_write
def readfull(count)
value = ''
loop do
value << kgio_read!(count - value.bytesize)
break if value.bytesize == count
end
value
end
def read_available
value = ''
loop do
ret = kgio_tryread(8196)
case ret
when nil
raise EOFError, 'end of stream'
when :wait_readable
break
else
value << ret
end
end
value
end
end
if ::Kgio.respond_to?(:wait_readable=)
::Kgio.wait_readable = :kgio_wait_readable
::Kgio.wait_writable = :kgio_wait_writable
end
rescue LoadError
puts "Using standard socket IO (#{RUBY_DESCRIPTION})" if defined?($TESTING) && $TESTING
class Dalli::Server::KSocket < TCPSocket
attr_accessor :options, :server
def self.open(host, port, server, options = {})
Timeout.timeout(options[:socket_timeout]) do
sock = new(host, port)
sock.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_NODELAY, true)
sock.setsockopt(Socket::SOL_SOCKET, Socket::SO_KEEPALIVE, true) if options[:keepalive]
sock.options = { :host => host, :port => port }.merge(options)
sock.server = server
sock
end
end
def readfull(count)
value = ''
begin
loop do
value << read_nonblock(count - value.bytesize)
break if value.bytesize == count
end
rescue Errno::EAGAIN, Errno::EWOULDBLOCK
if IO.select([self], nil, nil, options[:socket_timeout])
retry
else
raise Timeout::Error, "IO timeout: #{options.inspect}"
end
end
value
end
def read_available
value = ''
loop do
begin
value << read_nonblock(8196)
rescue Errno::EAGAIN, Errno::EWOULDBLOCK
break
end
end
value
end
end
end

View File

@ -0,0 +1,3 @@
module Dalli
VERSION = '2.7.2'
end

View File

@ -0,0 +1,75 @@
require 'rack/session/abstract/id'
require 'dalli'
module Rack
module Session
class Dalli < Abstract::ID
attr_reader :pool, :mutex
DEFAULT_OPTIONS = Abstract::ID::DEFAULT_OPTIONS.merge \
:namespace => 'rack:session',
:memcache_server => 'localhost:11211'
def initialize(app, options={})
super
@mutex = Mutex.new
mserv = @default_options[:memcache_server]
mopts = @default_options.reject{|k,v| !DEFAULT_OPTIONS.include? k }
@pool = options[:cache] || ::Dalli::Client.new(mserv, mopts)
@pool.alive!
end
def generate_sid
loop do
sid = super
break sid unless @pool.get(sid)
end
end
def get_session(env, sid)
with_lock(env, [nil, {}]) do
unless sid and !sid.empty? and session = @pool.get(sid)
sid, session = generate_sid, {}
unless @pool.add(sid, session)
raise "Session collision on '#{sid.inspect}'"
end
end
[sid, session]
end
end
def set_session(env, session_id, new_session, options)
return false unless session_id
expiry = options[:expire_after]
expiry = expiry.nil? ? 0 : expiry + 1
with_lock(env, false) do
@pool.set session_id, new_session, expiry
session_id
end
end
def destroy_session(env, session_id, options)
with_lock(env) do
@pool.delete(session_id)
generate_sid unless options[:drop]
end
end
def with_lock(env, default=nil)
@mutex.lock if env['rack.multithread']
yield
rescue ::Dalli::DalliError, Errno::ECONNREFUSED
raise if $!.message =~ /undefined class/
if $VERBOSE
warn "#{self} is unable to find memcached server."
warn $!.inspect
end
default
ensure
@mutex.unlock if @mutex.locked?
end
end
end
end

View File

@ -0,0 +1,242 @@
require 'helper'
require 'benchmark'
require 'active_support/cache/dalli_store'
describe 'performance' do
before do
puts "Testing #{Dalli::VERSION} with #{RUBY_DESCRIPTION}"
# We'll use a simple @value to try to avoid spending time in Marshal,
# which is a constant penalty that both clients have to pay
@value = []
@marshalled = Marshal.dump(@value)
@servers = ['127.0.0.1:19122', 'localhost:19122']
@key1 = "Short"
@key2 = "Sym1-2-3::45"*8
@key3 = "Long"*40
@key4 = "Medium"*8
# 5 and 6 are only used for multiget miss test
@key5 = "Medium2"*8
@key6 = "Long3"*40
@counter = 'counter'
end
it 'runs benchmarks' do
memcached do
Benchmark.bm(37) do |x|
n = 2500
@ds = ActiveSupport::Cache::DalliStore.new(@servers)
x.report("mixed:rails:dalli") do
n.times do
@ds.read @key1
@ds.write @key2, @value
@ds.fetch(@key3) { @value }
@ds.fetch(@key2) { @value }
@ds.fetch(@key1) { @value }
@ds.write @key2, @value, :unless_exists => true
@ds.delete @key2
@ds.increment @counter, 1, :initial => 100
@ds.increment @counter, 1, :expires_in => 12
@ds.decrement @counter, 1
end
end
x.report("mixed:rails-localcache:dalli") do
n.times do
@ds.with_local_cache do
@ds.read @key1
@ds.write @key2, @value
@ds.fetch(@key3) { @value }
@ds.fetch(@key2) { @value }
@ds.fetch(@key1) { @value }
@ds.write @key2, @value, :unless_exists => true
@ds.delete @key2
@ds.increment @counter, 1, :initial => 100
@ds.increment @counter, 1, :expires_in => 12
@ds.decrement @counter, 1
end
end
end
@ds.clear
sizeable_data = "<marquee>some view partial data</marquee>" * 50
[@key1, @key2, @key3, @key4, @key5, @key6].each do |key|
@ds.write(key, sizeable_data)
end
x.report("read_multi_big:rails:dalli") do
n.times do
@ds.read_multi @key1, @key2, @key3, @key4
@ds.read @key1
@ds.read @key2
@ds.read @key3
@ds.read @key4
@ds.read @key1
@ds.read @key2
@ds.read @key3
@ds.read_multi @key1, @key2, @key3
end
end
x.report("read_multi_big:rails-localcache:dalli") do
n.times do
@ds.with_local_cache do
@ds.read_multi @key1, @key2, @key3, @key4
@ds.read @key1
@ds.read @key2
@ds.read @key3
@ds.read @key4
end
@ds.with_local_cache do
@ds.read @key1
@ds.read @key2
@ds.read @key3
@ds.read_multi @key1, @key2, @key3
end
end
end
@m = Dalli::Client.new(@servers)
x.report("set:plain:dalli") do
n.times do
@m.set @key1, @marshalled, 0, :raw => true
@m.set @key2, @marshalled, 0, :raw => true
@m.set @key3, @marshalled, 0, :raw => true
@m.set @key1, @marshalled, 0, :raw => true
@m.set @key2, @marshalled, 0, :raw => true
@m.set @key3, @marshalled, 0, :raw => true
end
end
@m = Dalli::Client.new(@servers)
x.report("setq:plain:dalli") do
@m.multi do
n.times do
@m.set @key1, @marshalled, 0, :raw => true
@m.set @key2, @marshalled, 0, :raw => true
@m.set @key3, @marshalled, 0, :raw => true
@m.set @key1, @marshalled, 0, :raw => true
@m.set @key2, @marshalled, 0, :raw => true
@m.set @key3, @marshalled, 0, :raw => true
end
end
end
@m = Dalli::Client.new(@servers)
x.report("set:ruby:dalli") do
n.times do
@m.set @key1, @value
@m.set @key2, @value
@m.set @key3, @value
@m.set @key1, @value
@m.set @key2, @value
@m.set @key3, @value
end
end
@m = Dalli::Client.new(@servers)
x.report("get:plain:dalli") do
n.times do
@m.get @key1, :raw => true
@m.get @key2, :raw => true
@m.get @key3, :raw => true
@m.get @key1, :raw => true
@m.get @key2, :raw => true
@m.get @key3, :raw => true
end
end
@m = Dalli::Client.new(@servers)
x.report("get:ruby:dalli") do
n.times do
@m.get @key1
@m.get @key2
@m.get @key3
@m.get @key1
@m.get @key2
@m.get @key3
end
end
@m = Dalli::Client.new(@servers)
x.report("multiget:ruby:dalli") do
n.times do
# We don't use the keys array because splat is slow
@m.get_multi @key1, @key2, @key3, @key4, @key5, @key6
end
end
@m = Dalli::Client.new(@servers)
x.report("missing:ruby:dalli") do
n.times do
begin @m.delete @key1; rescue; end
begin @m.get @key1; rescue; end
begin @m.delete @key2; rescue; end
begin @m.get @key2; rescue; end
begin @m.delete @key3; rescue; end
begin @m.get @key3; rescue; end
end
end
@m = Dalli::Client.new(@servers)
x.report("mixed:ruby:dalli") do
n.times do
@m.set @key1, @value
@m.set @key2, @value
@m.set @key3, @value
@m.get @key1
@m.get @key2
@m.get @key3
@m.set @key1, @value
@m.get @key1
@m.set @key2, @value
@m.get @key2
@m.set @key3, @value
@m.get @key3
end
end
@m = Dalli::Client.new(@servers)
x.report("mixedq:ruby:dalli") do
@m.multi do
n.times do
@m.set @key1, @value
@m.set @key2, @value
@m.set @key3, @value
@m.get @key1
@m.get @key2
@m.get @key3
@m.set @key1, @value
@m.get @key1
@m.set @key2, @value
@m.replace @key2, @value
@m.delete @key3
@m.add @key3, @value
@m.get @key2
@m.set @key3, @value
@m.get @key3
end
end
end
@m = Dalli::Client.new(@servers)
x.report("incr:ruby:dalli") do
counter = 'foocount'
n.times do
@m.incr counter, 1, 0, 1
end
n.times do
@m.decr counter, 1
end
assert_equal 0, @m.incr(counter, 0)
end
end
end
end
end

View File

@ -0,0 +1,55 @@
$TESTING = true
require 'rubygems'
# require 'simplecov'
# SimpleCov.start
require 'minitest/pride'
require 'minitest/autorun'
require 'mocha/setup'
require 'memcached_mock'
ENV['MEMCACHED_SASL_PWDB'] = "#{File.dirname(__FILE__)}/sasldb"
WANT_RAILS_VERSION = ENV['RAILS_VERSION'] || '>= 3.0.0'
gem 'rails', WANT_RAILS_VERSION
require 'rails'
puts "Testing with Rails #{Rails.version}"
require 'dalli'
require 'logger'
Dalli.logger = Logger.new(STDOUT)
Dalli.logger.level = Logger::ERROR
class MiniTest::Spec
include MemcachedMock::Helper
def assert_error(error, regexp=nil, &block)
ex = assert_raises(error, &block)
assert_match(regexp, ex.message, "#{ex.class.name}: #{ex.message}\n#{ex.backtrace.join("\n\t")}")
end
def op_cas_succeeds(rsp)
rsp.is_a?(Integer) && rsp > 0
end
def op_replace_succeeds(rsp)
rsp.is_a?(Integer) && rsp > 0
end
# add and set must have the same return value because of DalliStore#write_entry
def op_addset_succeeds(rsp)
rsp.is_a?(Integer) && rsp > 0
end
def with_activesupport
require 'active_support/all'
require 'active_support/cache/dalli_store'
yield
end
def with_actionpack
require 'action_dispatch'
require 'action_controller'
yield
end
end

View File

@ -0,0 +1,121 @@
require "socket"
$started = {}
module MemcachedMock
def self.start(port=19123, &block)
server = TCPServer.new("localhost", port)
session = server.accept
block.call session
end
def self.delayed_start(port=19123, wait=1, &block)
server = TCPServer.new("localhost", port)
sleep wait
block.call server
end
module Helper
# Forks the current process and starts a new mock Memcached server on
# port 22122.
#
# memcached_mock(lambda {|sock| socket.write('123') }) do
# assert_equal "PONG", Dalli::Client.new('localhost:22122').get('abc')
# end
#
def memcached_mock(proc, meth = :start)
return unless supports_fork?
begin
pid = fork do
trap("TERM") { exit }
MemcachedMock.send(meth) do |*args|
proc.call(*args)
end
end
sleep 0.3 # Give time for the socket to start listening.
yield
ensure
if pid
Process.kill("TERM", pid)
Process.wait(pid)
end
end
end
PATHS = %w(
/usr/local/bin/
/opt/local/bin/
/usr/bin/
)
def find_memcached
output = `memcached -h | head -1`.strip
if output && output =~ /^memcached (\d.\d.\d+)/ && $1 > '1.4'
return (puts "Found #{output} in PATH"; '')
end
PATHS.each do |path|
output = `memcached -h | head -1`.strip
if output && output =~ /^memcached (\d\.\d\.\d+)/ && $1 > '1.4'
return (puts "Found #{output} in #{path}"; path)
end
end
raise Errno::ENOENT, "Unable to find memcached 1.4+ locally"
end
def memcached(port=19122, args='', options={})
memcached_server(port, args)
yield Dalli::Client.new(["localhost:#{port}", "127.0.0.1:#{port}"], options)
end
def memcached_cas(port=19122, args='', options={})
memcached_server(port, args)
require 'dalli/cas/client'
yield Dalli::Client.new(["localhost:#{port}", "127.0.0.1:#{port}"], options)
end
def memcached_server(port=19122, args='')
Memcached.path ||= find_memcached
cmd = "#{Memcached.path}memcached #{args} -p #{port}"
$started[port] ||= begin
#puts "Starting: #{cmd}..."
pid = IO.popen(cmd).pid
at_exit do
begin
Process.kill("TERM", pid)
Process.wait(pid)
rescue Errno::ECHILD, Errno::ESRCH
end
end
sleep 0.1
pid
end
end
def supports_fork?
!defined?(RUBY_ENGINE) || RUBY_ENGINE != 'jruby'
end
def memcached_kill(port)
pid = $started.delete(port)
if pid
begin
Process.kill("TERM", pid)
Process.wait(pid)
rescue Errno::ECHILD, Errno::ESRCH
end
end
end
end
end
module Memcached
class << self
attr_accessor :path
end
end

View File

@ -0,0 +1 @@
testuser:testtest:::::::

View File

@ -0,0 +1,439 @@
# encoding: utf-8
require 'helper'
require 'connection_pool'
class MockUser
def cache_key
"users/1/21348793847982314"
end
end
describe 'ActiveSupport' do
describe 'active_support caching' do
it 'has accessible options' do
@dalli = ActiveSupport::Cache.lookup_store(:dalli_store, 'localhost:19122', :expires_in => 5.minutes, :frob => 'baz')
assert_equal 'baz', @dalli.options[:frob]
end
it 'allow mute and silence' do
@dalli = ActiveSupport::Cache.lookup_store(:dalli_store, 'localhost:19122')
@dalli.mute do
assert op_addset_succeeds(@dalli.write('foo', 'bar', nil))
assert_equal 'bar', @dalli.read('foo', nil)
end
refute @dalli.silence?
@dalli.silence!
assert_equal true, @dalli.silence?
end
it 'handle nil options' do
@dalli = ActiveSupport::Cache.lookup_store(:dalli_store, 'localhost:19122')
assert op_addset_succeeds(@dalli.write('foo', 'bar', nil))
assert_equal 'bar', @dalli.read('foo', nil)
assert_equal 18, @dalli.fetch('lkjsadlfk', nil) { 18 }
assert_equal 18, @dalli.fetch('lkjsadlfk', nil) { 18 }
assert_equal 1, @dalli.increment('lkjsa', 1, nil)
assert_equal 2, @dalli.increment('lkjsa', 1, nil)
assert_equal 1, @dalli.decrement('lkjsa', 1, nil)
assert_equal true, @dalli.delete('lkjsa')
end
it 'support fetch' do
with_activesupport do
memcached do
connect
dvalue = @dalli.fetch('someotherkeywithoutspaces', :expires_in => 1.second) { 123 }
assert_equal 123, dvalue
o = Object.new
o.instance_variable_set :@foo, 'bar'
dvalue = @dalli.fetch(rand_key, :raw => true) { o }
assert_equal o, dvalue
dvalue = @dalli.fetch(rand_key) { o }
assert_equal o, dvalue
@dalli.write('false', false)
dvalue = @dalli.fetch('false') { flunk }
assert_equal false, dvalue
user = MockUser.new
@dalli.write(user.cache_key, false)
dvalue = @dalli.fetch(user) { flunk }
assert_equal false, dvalue
end
end
end
it 'support keys with spaces on Rails3' do
with_activesupport do
memcached do
connect
dvalue = @dalli.fetch('some key with spaces', :expires_in => 1.second) { 123 }
assert_equal 123, dvalue
end
end
end
it 'support read_multi' do
with_activesupport do
memcached do
connect
x = rand_key
y = rand_key
assert_equal({}, @dalli.read_multi(x, y))
@dalli.write(x, '123')
@dalli.write(y, 123)
assert_equal({ x => '123', y => 123 }, @dalli.read_multi(x, y))
end
end
end
it 'support read_multi with an array' do
with_activesupport do
memcached do
connect
x = rand_key
y = rand_key
assert_equal({}, @dalli.read_multi([x, y]))
@dalli.write(x, '123')
@dalli.write(y, 123)
assert_equal({}, @dalli.read_multi([x, y]))
@dalli.write([x, y], '123')
assert_equal({ [x, y] => '123' }, @dalli.read_multi([x, y]))
end
end
end
it 'support raw read_multi' do
with_activesupport do
memcached do
connect
@dalli.write("abc", 5, :raw => true)
@dalli.write("cba", 5, :raw => true)
assert_equal({'abc' => '5', 'cba' => '5' }, @dalli.read_multi("abc", "cba"))
end
end
end
it 'support read_multi with LocalCache' do
with_activesupport do
memcached do
connect
x = rand_key
y = rand_key
assert_equal({}, @dalli.read_multi(x, y))
@dalli.write(x, '123')
@dalli.write(y, 456)
@dalli.with_local_cache do
assert_equal({ x => '123', y => 456 }, @dalli.read_multi(x, y))
Dalli::Client.any_instance.expects(:get).with(any_parameters).never
dres = @dalli.read(x)
assert_equal dres, '123'
end
Dalli::Client.any_instance.unstub(:get)
# Fresh LocalStore
@dalli.with_local_cache do
@dalli.read(x)
Dalli::Client.any_instance.expects(:get_multi).with([y.to_s]).returns(y.to_s => 456)
assert_equal({ x => '123', y => 456}, @dalli.read_multi(x, y))
end
end
end
end
it 'supports fetch_multi' do
with_activesupport do
memcached do
connect
x = rand_key.to_s
y = rand_key
hash = { x => 'ABC', y => 'DEF' }
@dalli.write(y, '123')
results = @dalli.fetch_multi(x, y) { |key| hash[key] }
assert_equal({ x => 'ABC', y => '123' }, results)
assert_equal('ABC', @dalli.read(x))
assert_equal('123', @dalli.read(y))
end
end
end
it 'support read, write and delete' do
with_activesupport do
memcached do
connect
y = rand_key
assert_nil @dalli.read(y)
dres = @dalli.write(y, 123)
assert op_addset_succeeds(dres)
dres = @dalli.read(y)
assert_equal 123, dres
dres = @dalli.delete(y)
assert_equal true, dres
user = MockUser.new
dres = @dalli.write(user.cache_key, "foo")
assert op_addset_succeeds(dres)
dres = @dalli.read(user)
assert_equal "foo", dres
dres = @dalli.delete(user)
assert_equal true, dres
bigkey = ''
@dalli.write(bigkey, 'double width')
assert_equal 'double width', @dalli.read(bigkey)
assert_equal({bigkey => "double width"}, @dalli.read_multi(bigkey))
end
end
end
it 'support read, write and delete with LocalCache' do
with_activesupport do
memcached do
connect
y = rand_key.to_s
@dalli.with_local_cache do
Dalli::Client.any_instance.expects(:get).with(y, {}).once.returns(123)
dres = @dalli.read(y)
assert_equal 123, dres
Dalli::Client.any_instance.expects(:get).with(y, {}).never
dres = @dalli.read(y)
assert_equal 123, dres
@dalli.write(y, 456)
dres = @dalli.read(y)
assert_equal 456, dres
@dalli.delete(y)
Dalli::Client.any_instance.expects(:get).with(y, {}).once.returns(nil)
dres = @dalli.read(y)
assert_equal nil, dres
end
end
end
end
it 'support unless_exist with LocalCache' do
with_activesupport do
memcached do
connect
y = rand_key.to_s
@dalli.with_local_cache do
Dalli::Client.any_instance.expects(:add).with(y, 123, nil, {:unless_exist => true}).once.returns(true)
dres = @dalli.write(y, 123, :unless_exist => true)
assert_equal true, dres
Dalli::Client.any_instance.expects(:add).with(y, 321, nil, {:unless_exist => true}).once.returns(false)
dres = @dalli.write(y, 321, :unless_exist => true)
assert_equal false, dres
Dalli::Client.any_instance.expects(:get).with(y, {}).once.returns(123)
dres = @dalli.read(y)
assert_equal 123, dres
end
end
end
end
it 'support increment/decrement commands' do
with_activesupport do
memcached do
connect
assert op_addset_succeeds(@dalli.write('counter', 0, :raw => true))
assert_equal 1, @dalli.increment('counter')
assert_equal 2, @dalli.increment('counter')
assert_equal 1, @dalli.decrement('counter')
assert_equal "1", @dalli.read('counter', :raw => true)
assert_equal 1, @dalli.increment('counterX')
assert_equal 2, @dalli.increment('counterX')
assert_equal 2, @dalli.read('counterX', :raw => true).to_i
assert_equal 5, @dalli.increment('counterY1', 1, :initial => 5)
assert_equal 6, @dalli.increment('counterY1', 1, :initial => 5)
assert_equal 6, @dalli.read('counterY1', :raw => true).to_i
assert_equal nil, @dalli.increment('counterZ1', 1, :initial => nil)
assert_equal nil, @dalli.read('counterZ1')
assert_equal 5, @dalli.decrement('counterY2', 1, :initial => 5)
assert_equal 4, @dalli.decrement('counterY2', 1, :initial => 5)
assert_equal 4, @dalli.read('counterY2', :raw => true).to_i
assert_equal nil, @dalli.decrement('counterZ2', 1, :initial => nil)
assert_equal nil, @dalli.read('counterZ2')
user = MockUser.new
assert op_addset_succeeds(@dalli.write(user, 0, :raw => true))
assert_equal 1, @dalli.increment(user)
assert_equal 2, @dalli.increment(user)
assert_equal 1, @dalli.decrement(user)
assert_equal "1", @dalli.read(user, :raw => true)
end
end
end
it 'support exist command' do
with_activesupport do
memcached do
connect
@dalli.write(:foo, 'a')
@dalli.write(:false_value, false)
assert_equal true, @dalli.exist?(:foo)
assert_equal true, @dalli.exist?(:false_value)
assert_equal false, @dalli.exist?(:bar)
user = MockUser.new
@dalli.write(user, 'foo')
assert_equal true, @dalli.exist?(user)
end
end
end
it 'support other esoteric commands' do
with_activesupport do
memcached do
connect
ds = @dalli.stats
assert_equal 1, ds.keys.size
assert ds[ds.keys.first].keys.size > 0
@dalli.reset
end
end
end
it 'respect "raise_errors" option' do
with_activesupport do
memcached(29125) do
@dalli = ActiveSupport::Cache.lookup_store(:dalli_store, 'localhost:29125')
@dalli.write 'foo', 'bar'
assert_equal @dalli.read('foo'), 'bar'
memcached_kill(29125)
assert_equal @dalli.read('foo'), nil
@dalli = ActiveSupport::Cache.lookup_store(:dalli_store, 'localhost:29125', :raise_errors => true)
exception = [Dalli::RingError, { :message => "No server available" }]
assert_raises(*exception) { @dalli.read 'foo' }
assert_raises(*exception) { @dalli.read 'foo', :raw => true }
assert_raises(*exception) { @dalli.write 'foo', 'bar' }
assert_raises(*exception) { @dalli.exist? 'foo' }
assert_raises(*exception) { @dalli.increment 'foo' }
assert_raises(*exception) { @dalli.decrement 'foo' }
assert_raises(*exception) { @dalli.delete 'foo' }
assert_equal @dalli.read_multi('foo', 'bar'), {}
assert_raises(*exception) { @dalli.delete 'foo' }
assert_raises(*exception) { @dalli.fetch('foo') { 42 } }
end
end
end
end
it 'handle crazy characters from far-away lands' do
with_activesupport do
memcached do
connect
key = "fooƒ"
value = 'bafƒ'
assert op_addset_succeeds(@dalli.write(key, value))
assert_equal value, @dalli.read(key)
end
end
end
it 'normalize options as expected' do
with_activesupport do
memcached do
@dalli = ActiveSupport::Cache::DalliStore.new('localhost:19122', :expires_in => 1, :namespace => 'foo', :compress => true)
assert_equal 1, @dalli.instance_variable_get(:@data).instance_variable_get(:@options)[:expires_in]
assert_equal 'foo', @dalli.instance_variable_get(:@data).instance_variable_get(:@options)[:namespace]
assert_equal ["localhost:19122"], @dalli.instance_variable_get(:@data).instance_variable_get(:@servers)
end
end
end
it 'handles nil server with additional options' do
with_activesupport do
memcached do
@dalli = ActiveSupport::Cache::DalliStore.new(nil, :expires_in => 1, :namespace => 'foo', :compress => true)
assert_equal 1, @dalli.instance_variable_get(:@data).instance_variable_get(:@options)[:expires_in]
assert_equal 'foo', @dalli.instance_variable_get(:@data).instance_variable_get(:@options)[:namespace]
assert_equal ["127.0.0.1:11211"], @dalli.instance_variable_get(:@data).instance_variable_get(:@servers)
end
end
end
it 'supports connection pooling' do
with_activesupport do
memcached do
@dalli = ActiveSupport::Cache::DalliStore.new('localhost:19122', :expires_in => 1, :namespace => 'foo', :compress => true, :pool_size => 3)
assert_equal nil, @dalli.read('foo')
assert @dalli.write('foo', 1)
assert_equal 1, @dalli.fetch('foo') { raise 'boom' }
assert_equal true, @dalli.dalli.is_a?(ConnectionPool)
assert_equal 1, @dalli.increment('bar')
assert_equal 0, @dalli.decrement('bar')
assert_equal true, @dalli.delete('bar')
assert_equal [true], @dalli.clear
assert_equal 1, @dalli.stats.size
end
end
end
it 'allow keys to be frozen' do
with_activesupport do
memcached do
connect
key = "foo"
key.freeze
assert op_addset_succeeds(@dalli.write(key, "value"))
end
end
end
it 'allow keys from a hash' do
with_activesupport do
memcached do
connect
map = { "one" => "one", "two" => "two" }
map.each_pair do |k, v|
assert op_addset_succeeds(@dalli.write(k, v))
end
assert_equal map, @dalli.read_multi(*(map.keys))
end
end
end
def connect
@dalli = ActiveSupport::Cache.lookup_store(:dalli_store, 'localhost:19122', :expires_in => 10.seconds, :namespace => lambda{33.to_s(36)})
@dalli.clear
end
def rand_key
rand(1_000_000_000)
end
end

View File

@ -0,0 +1,107 @@
require 'helper'
require 'memcached_mock'
describe 'Dalli::Cas::Client' do
describe 'using a live server' do
it 'supports get with CAS' do
memcached_cas do |dc|
dc.flush
expected = { 'blah' => 'blerg!' }
get_block_called = false
stored_value = stored_cas = nil
# Validate call-with-block
dc.get_cas('gets_key') do |v, cas|
get_block_called = true
stored_value = v
stored_cas = cas
end
assert get_block_called
assert_nil stored_value
dc.set('gets_key', expected)
# Validate call-with-return-value
stored_value, stored_cas = dc.get_cas('gets_key')
assert_equal stored_value, expected
assert(stored_cas != 0)
end
end
it 'supports multi-get with CAS' do
memcached_cas do |dc|
dc.close
dc.flush
expected_hash = {'a' => 'foo', 'b' => 123}
expected_hash.each_pair do |k, v|
dc.set(k, v)
end
# Invocation without block
resp = dc.get_multi_cas(%w(a b c d e f))
resp.each_pair do |k, data|
value, cas = [data.first, data.second]
assert_equal expected_hash[k], value
assert(cas && cas != 0)
end
# Invocation with block
dc.get_multi_cas(%w(a b c d e f)) do |k, data|
value, cas = [data.first, data.second]
assert_equal expected_hash[k], value
assert(cas && cas != 0)
end
end
end
it 'supports replace-with-CAS operation' do
memcached_cas do |dc|
dc.flush
cas = dc.set('key', 'value')
# Accepts CAS, replaces, and returns new CAS
cas = dc.replace_cas('key', 'value2', cas)
assert cas.is_a?(Integer)
assert_equal 'value2', dc.get('key')
end
end
it 'supports delete with CAS' do
memcached_cas do |dc|
cas = dc.set('some_key', 'some_value')
dc.delete_cas('some_key', cas)
assert_nil dc.get('some_key')
end
end
it 'handles CAS round-trip operations' do
memcached_cas do |dc|
dc.flush
expected = {'blah' => 'blerg!'}
dc.set('some_key', expected)
value, cas = dc.get_cas('some_key')
assert_equal value, expected
assert(!cas.nil? && cas != 0)
# Set operation, first with wrong then with correct CAS
expected = {'blah' => 'set succeeded'}
assert(dc.set_cas('some_key', expected, cas+1) == false)
assert op_addset_succeeds(cas = dc.set_cas('some_key', expected, cas))
# Replace operation, first with wrong then with correct CAS
expected = {'blah' => 'replace succeeded'}
assert(dc.replace_cas('some_key', expected, cas+1) == false)
assert op_addset_succeeds(cas = dc.replace_cas('some_key', expected, cas))
# Delete operation, first with wrong then with correct CAS
assert(dc.delete_cas('some_key', cas+1) == false)
assert dc.delete_cas('some_key', cas)
end
end
end
end

View File

@ -0,0 +1,53 @@
# encoding: utf-8
require 'helper'
require 'json'
require 'memcached_mock'
class NoopCompressor
def self.compress(data)
data
end
def self.decompress(data)
data
end
end
describe 'Compressor' do
it 'default to Dalli::Compressor' do
memcached_kill(29199) do |dc|
memcache = Dalli::Client.new('127.0.0.1:29199')
memcache.set 1,2
assert_equal Dalli::Compressor, memcache.instance_variable_get('@ring').servers.first.compressor
end
end
it 'support a custom compressor' do
memcached_kill(29199) do |dc|
memcache = Dalli::Client.new('127.0.0.1:29199', :compressor => NoopCompressor)
memcache.set 1,2
begin
assert_equal NoopCompressor, memcache.instance_variable_get('@ring').servers.first.compressor
memcached(19127) do |newdc|
assert newdc.set("string-test", "a test string")
assert_equal("a test string", newdc.get("string-test"))
end
end
end
end
end
describe 'GzipCompressor' do
it 'compress and uncompress data using Zlib::GzipWriter/Reader' do
memcached(19127,nil,{:compress=>true,:compressor=>Dalli::GzipCompressor}) do |dc|
data = (0...1025).map{65.+(rand(26)).chr}.join
assert dc.set("test", data)
assert_equal Dalli::GzipCompressor, dc.instance_variable_get('@ring').servers.first.compressor
assert_equal(data, dc.get("test"))
end
end
end

View File

@ -0,0 +1,625 @@
require 'helper'
require 'memcached_mock'
describe 'Dalli' do
describe 'options parsing' do
it 'handle deprecated options' do
dc = Dalli::Client.new('foo', :compression => true)
assert dc.instance_variable_get(:@options)[:compress]
refute dc.instance_variable_get(:@options)[:compression]
end
it 'not warn about valid options' do
dc = Dalli::Client.new('foo', :compress => true)
# Rails.logger.expects :warn
assert dc.instance_variable_get(:@options)[:compress]
end
it 'raises error with invalid expires_in' do
bad_data = [{:bad => 'expires in data'}, Hash, [1,2,3]]
bad_data.each do |bad|
assert_raises ArgumentError do
Dalli::Client.new('foo', {:expires_in => bad})
end
end
end
it 'return string type for namespace attribute' do
dc = Dalli::Client.new('foo', :namespace => :wunderschoen)
assert_equal "wunderschoen", dc.send(:namespace)
dc.close
dc = Dalli::Client.new('foo', :namespace => Proc.new{:wunderschoen})
assert_equal "wunderschoen", dc.send(:namespace)
dc.close
end
end
describe 'key validation' do
it 'not allow blanks' do
memcached do |dc|
dc.set ' ', 1
assert_equal 1, dc.get(' ')
dc.set "\t", 1
assert_equal 1, dc.get("\t")
dc.set "\n", 1
assert_equal 1, dc.get("\n")
assert_raises ArgumentError do
dc.set "", 1
end
assert_raises ArgumentError do
dc.set nil, 1
end
end
end
it 'allow namespace to be a symbol' do
memcached(19122, '', :namespace => :wunderschoen) do |dc|
dc.set "x" * 251, 1
assert 1, dc.get("#{'x' * 200}:md5:#{Digest::MD5.hexdigest('x' * 251)}")
end
end
end
it "default to localhost:11211" do
dc = Dalli::Client.new
ring = dc.send(:ring)
s1 = ring.servers.first.hostname
assert_equal 1, ring.servers.size
dc.close
dc = Dalli::Client.new('localhost:11211')
ring = dc.send(:ring)
s2 = ring.servers.first.hostname
assert_equal 1, ring.servers.size
dc.close
dc = Dalli::Client.new(['localhost:11211'])
ring = dc.send(:ring)
s3 = ring.servers.first.hostname
assert_equal 1, ring.servers.size
dc.close
assert_equal '127.0.0.1', s1
assert_equal s2, s3
end
it "accept comma separated string" do
dc = Dalli::Client.new("server1.example.com:11211,server2.example.com:11211")
ring = dc.send(:ring)
assert_equal 2, ring.servers.size
s1,s2 = ring.servers.map(&:hostname)
assert_equal "server1.example.com", s1
assert_equal "server2.example.com", s2
end
it "accept array of servers" do
dc = Dalli::Client.new(["server1.example.com:11211","server2.example.com:11211"])
ring = dc.send(:ring)
assert_equal 2, ring.servers.size
s1,s2 = ring.servers.map(&:hostname)
assert_equal "server1.example.com", s1
assert_equal "server2.example.com", s2
end
describe 'using a live server' do
it "support get/set" do
memcached do |dc|
dc.flush
val1 = "1234567890"*105000
assert_equal false, dc.set('a', val1)
val1 = "1234567890"*100000
dc.set('a', val1)
val2 = dc.get('a')
assert_equal val1, val2
assert op_addset_succeeds(dc.set('a', nil))
assert_nil dc.get('a')
end
end
it 'supports delete' do
memcached do |dc|
dc.set('some_key', 'some_value')
assert_equal 'some_value', dc.get('some_key')
dc.delete('some_key')
assert_nil dc.get('some_key')
end
end
it 'returns nil for nonexist key' do
memcached do |dc|
assert_equal nil, dc.get('notexist')
end
end
it 'allows "Not found" as value' do
memcached do |dc|
dc.set('key1', 'Not found')
assert_equal 'Not found', dc.get('key1')
end
end
it "support stats" do
memcached do |dc|
# make sure that get_hits would not equal 0
dc.get(:a)
stats = dc.stats
servers = stats.keys
assert(servers.any? do |s|
stats[s]["get_hits"].to_i != 0
end, "general stats failed")
stats_items = dc.stats(:items)
servers = stats_items.keys
assert(servers.all? do |s|
stats_items[s].keys.any? do |key|
key =~ /items:[0-9]+:number/
end
end, "stats items failed")
stats_slabs = dc.stats(:slabs)
servers = stats_slabs.keys
assert(servers.all? do |s|
stats_slabs[s].keys.any? do |key|
key == "active_slabs"
end
end, "stats slabs failed")
# reset_stats test
results = dc.reset_stats
assert(results.all? { |x| x })
stats = dc.stats
servers = stats.keys
# check if reset was performed
servers.each do |s|
assert_equal 0, dc.stats[s]["get_hits"].to_i
end
end
end
it "support the fetch operation" do
memcached do |dc|
dc.flush
expected = { 'blah' => 'blerg!' }
executed = false
value = dc.fetch('fetch_key') do
executed = true
expected
end
assert_equal expected, value
assert_equal true, executed
executed = false
value = dc.fetch('fetch_key') do
executed = true
expected
end
assert_equal expected, value
assert_equal false, executed
end
end
it "support the fetch operation with falsey values" do
memcached do |dc|
dc.flush
dc.set("fetch_key", false)
res = dc.fetch("fetch_key") { flunk "fetch block called" }
assert_equal false, res
dc.set("fetch_key", nil)
res = dc.fetch("fetch_key") { "bob" }
assert_equal 'bob', res
end
end
it "support the cas operation" do
memcached do |dc|
dc.flush
expected = { 'blah' => 'blerg!' }
resp = dc.cas('cas_key') do |value|
fail('Value it not exist')
end
assert_nil resp
mutated = { 'blah' => 'foo!' }
dc.set('cas_key', expected)
resp = dc.cas('cas_key') do |value|
assert_equal expected, value
mutated
end
assert op_cas_succeeds(resp)
resp = dc.get('cas_key')
assert_equal mutated, resp
end
end
it "support multi-get" do
memcached do |dc|
dc.close
dc.flush
resp = dc.get_multi(%w(a b c d e f))
assert_equal({}, resp)
dc.set('a', 'foo')
dc.set('b', 123)
dc.set('c', %w(a b c))
# Invocation without block
resp = dc.get_multi(%w(a b c d e f))
expected_resp = { 'a' => 'foo', 'b' => 123, 'c' => %w(a b c) }
assert_equal(expected_resp, resp)
# Invocation with block
dc.get_multi(%w(a b c d e f)) do |k, v|
assert(expected_resp.has_key?(k) && expected_resp[k] == v)
expected_resp.delete(k)
end
assert expected_resp.empty?
# Perform a big multi-get with 1000 elements.
arr = []
dc.multi do
1000.times do |idx|
dc.set idx, idx
arr << idx
end
end
result = dc.get_multi(arr)
assert_equal(1000, result.size)
assert_equal(50, result['50'])
end
end
it 'support raw incr/decr' do
memcached do |client|
client.flush
assert op_addset_succeeds(client.set('fakecounter', 0, 0, :raw => true))
assert_equal 1, client.incr('fakecounter', 1)
assert_equal 2, client.incr('fakecounter', 1)
assert_equal 3, client.incr('fakecounter', 1)
assert_equal 1, client.decr('fakecounter', 2)
assert_equal "1", client.get('fakecounter', :raw => true)
resp = client.incr('mycounter', 0)
assert_nil resp
resp = client.incr('mycounter', 1, 0, 2)
assert_equal 2, resp
resp = client.incr('mycounter', 1)
assert_equal 3, resp
resp = client.set('rawcounter', 10, 0, :raw => true)
assert op_cas_succeeds(resp)
resp = client.get('rawcounter', :raw => true)
assert_equal '10', resp
resp = client.incr('rawcounter', 1)
assert_equal 11, resp
end
end
it "support incr/decr operations" do
memcached do |dc|
dc.flush
resp = dc.decr('counter', 100, 5, 0)
assert_equal 0, resp
resp = dc.decr('counter', 10)
assert_equal 0, resp
resp = dc.incr('counter', 10)
assert_equal 10, resp
current = 10
100.times do |x|
resp = dc.incr('counter', 10)
assert_equal current + ((x+1)*10), resp
end
resp = dc.decr('10billion', 0, 5, 10)
# go over the 32-bit mark to verify proper (un)packing
resp = dc.incr('10billion', 10_000_000_000)
assert_equal 10_000_000_010, resp
resp = dc.decr('10billion', 1)
assert_equal 10_000_000_009, resp
resp = dc.decr('10billion', 0)
assert_equal 10_000_000_009, resp
resp = dc.incr('10billion', 0)
assert_equal 10_000_000_009, resp
assert_nil dc.incr('DNE', 10)
assert_nil dc.decr('DNE', 10)
resp = dc.incr('big', 100, 5, 0xFFFFFFFFFFFFFFFE)
assert_equal 0xFFFFFFFFFFFFFFFE, resp
resp = dc.incr('big', 1)
assert_equal 0xFFFFFFFFFFFFFFFF, resp
# rollover the 64-bit value, we'll get something undefined.
resp = dc.incr('big', 1)
refute_equal 0x10000000000000000, resp
dc.reset
end
end
it 'support the append and prepend operations' do
memcached do |dc|
dc.flush
assert op_addset_succeeds(dc.set('456', 'xyz', 0, :raw => true))
assert_equal true, dc.prepend('456', '0')
assert_equal true, dc.append('456', '9')
assert_equal '0xyz9', dc.get('456', :raw => true)
assert_equal '0xyz9', dc.get('456')
assert_equal false, dc.append('nonexist', 'abc')
assert_equal false, dc.prepend('nonexist', 'abc')
end
end
it 'supports replace operation' do
memcached do |dc|
dc.flush
dc.set('key', 'value')
assert op_replace_succeeds(dc.replace('key', 'value2'))
assert_equal 'value2', dc.get('key')
end
end
it 'support touch operation' do
memcached do |dc|
begin
dc.flush
dc.set 'key', 'value'
assert_equal true, dc.touch('key', 10)
assert_equal true, dc.touch('key')
assert_equal 'value', dc.get('key')
assert_nil dc.touch('notexist')
rescue Dalli::DalliError => e
# This will happen when memcached is in lesser version than 1.4.8
assert_equal 'Response error 129: Unknown command', e.message
end
end
end
it 'support version operation' do
memcached do |dc|
v = dc.version
servers = v.keys
assert(servers.any? do |s|
v[s] != nil
end, "version failed")
end
end
it 'allow TCP connections to be configured for keepalive' do
memcached(19122, '', :keepalive => true) do |dc|
dc.set(:a, 1)
ring = dc.send(:ring)
server = ring.servers.first
socket = server.instance_variable_get('@sock')
optval = socket.getsockopt(Socket::SOL_SOCKET, Socket::SO_KEEPALIVE)
optval = optval.unpack 'i'
assert_equal true, (optval[0] != 0)
end
end
it "pass a simple smoke test" do
memcached do |dc|
resp = dc.flush
refute_nil resp
assert_equal [true, true], resp
assert op_addset_succeeds(dc.set(:foo, 'bar'))
assert_equal 'bar', dc.get(:foo)
resp = dc.get('123')
assert_equal nil, resp
assert op_addset_succeeds(dc.set('123', 'xyz'))
resp = dc.get('123')
assert_equal 'xyz', resp
assert op_addset_succeeds(dc.set('123', 'abc'))
dc.prepend('123', '0')
dc.append('123', '0')
assert_raises Dalli::UnmarshalError do
resp = dc.get('123')
end
dc.close
dc = nil
dc = Dalli::Client.new('localhost:19122')
assert op_addset_succeeds(dc.set('456', 'xyz', 0, :raw => true))
resp = dc.prepend '456', '0'
assert_equal true, resp
resp = dc.append '456', '9'
assert_equal true, resp
resp = dc.get('456', :raw => true)
assert_equal '0xyz9', resp
assert op_addset_succeeds(dc.set('456', false))
resp = dc.get('456')
assert_equal false, resp
resp = dc.stats
assert_equal Hash, resp.class
dc.close
end
end
it "support multithreaded access" do
memcached do |cache|
cache.flush
workers = []
cache.set('f', 'zzz')
assert op_cas_succeeds((cache.cas('f') do |value|
value << 'z'
end))
assert_equal 'zzzz', cache.get('f')
# Have a bunch of threads perform a bunch of operations at the same time.
# Verify the result of each operation to ensure the request and response
# are not intermingled between threads.
10.times do
workers << Thread.new do
100.times do
cache.set('a', 9)
cache.set('b', 11)
inc = cache.incr('cat', 10, 0, 10)
cache.set('f', 'zzz')
res = cache.cas('f') do |value|
value << 'z'
end
refute_nil res
assert_equal false, cache.add('a', 11)
assert_equal({ 'a' => 9, 'b' => 11 }, cache.get_multi(['a', 'b']))
inc = cache.incr('cat', 10)
assert_equal 0, inc % 5
cache.decr('cat', 5)
assert_equal 11, cache.get('b')
assert_equal %w(a b), cache.get_multi('a', 'b', 'c').keys.sort
end
end
end
workers.each { |w| w.join }
cache.flush
end
end
it "handle namespaced keys" do
memcached do |dc|
dc = Dalli::Client.new('localhost:19122', :namespace => 'a')
dc.set('namespaced', 1)
dc2 = Dalli::Client.new('localhost:19122', :namespace => 'b')
dc2.set('namespaced', 2)
assert_equal 1, dc.get('namespaced')
assert_equal 2, dc2.get('namespaced')
end
end
it "handle nil namespace" do
memcached do |dc|
dc = Dalli::Client.new('localhost:19122', :namespace => nil)
assert_equal 'key', dc.send(:validate_key, 'key')
end
end
it 'truncate cache keys that are too long' do
memcached do
dc = Dalli::Client.new('localhost:19122', :namespace => 'some:namspace')
key = "this cache key is far too long so it must be hashed and truncated and stuff" * 10
value = "some value"
assert op_addset_succeeds(dc.set(key, value))
assert_equal value, dc.get(key)
end
end
it "handle namespaced keys in multi_get" do
memcached do |dc|
dc = Dalli::Client.new('localhost:19122', :namespace => 'a')
dc.set('a', 1)
dc.set('b', 2)
assert_equal({'a' => 1, 'b' => 2}, dc.get_multi('a', 'b'))
end
end
it "handle application marshalling issues" do
memcached do |dc|
old = Dalli.logger
Dalli.logger = Logger.new(nil)
begin
assert_equal false, dc.set('a', Proc.new { true })
ensure
Dalli.logger = old
end
end
end
describe 'with compression' do
it 'allow large values' do
memcached do |dc|
dalli = Dalli::Client.new(dc.instance_variable_get(:@servers), :compress => true)
value = "0"*1024*1024
assert_equal false, dc.set('verylarge', value)
dalli.set('verylarge', value)
end
end
end
describe 'in low memory conditions' do
it 'handle error response correctly' do
memcached(19125, '-m 1 -M') do |dc|
failed = false
value = "1234567890"*100
1_000.times do |idx|
begin
assert op_addset_succeeds(dc.set(idx, value))
rescue Dalli::DalliError
failed = true
assert((800..960).include?(idx), "unexpected failure on iteration #{idx}")
break
end
end
assert failed, 'did not fail under low memory conditions'
end
end
it 'fit more values with compression' do
memcached(19126, '-m 1 -M') do |dc|
dalli = Dalli::Client.new('localhost:19126', :compress => true)
failed = false
value = "1234567890"*1000
10_000.times do |idx|
begin
assert op_addset_succeeds(dalli.set(idx, value))
rescue Dalli::DalliError
failed = true
assert((6000..7800).include?(idx), "unexpected failure on iteration #{idx}")
break
end
end
assert failed, 'did not fail under low memory conditions'
end
end
end
end
end

View File

@ -0,0 +1,32 @@
# encoding: utf-8
require 'helper'
require 'memcached_mock'
describe 'Encoding' do
describe 'using a live server' do
it 'support i18n content' do
memcached do |dc|
key = 'foo'
utf_key = utf8 = 'ƒ©åÍÎ'
assert dc.set(key, utf8)
assert_equal utf8, dc.get(key)
dc.set(utf_key, utf8)
assert_equal utf8, dc.get(utf_key)
end
end
it 'support content expiry' do
memcached do |dc|
key = 'foo'
assert dc.set(key, 'bar', 1)
assert_equal 'bar', dc.get(key)
sleep 1.2
assert_equal nil, dc.get(key)
end
end
end
end

View File

@ -0,0 +1,128 @@
require 'helper'
describe 'failover' do
describe 'timeouts' do
it 'not lead to corrupt sockets' do
memcached(29125) do
dc = Dalli::Client.new ['localhost:29125']
begin
Timeout.timeout 0.01 do
1_000.times do
dc.set("test_123", {:test => "123"})
end
flunk("Did not timeout")
end
rescue Timeout::Error
end
assert_equal({:test => '123'}, dc.get("test_123"))
end
end
end
describe 'assuming some bad servers' do
it 'silently reconnect if server hiccups' do
memcached(29125) do
dc = Dalli::Client.new ['localhost:29125']
dc.set 'foo', 'bar'
foo = dc.get 'foo'
assert_equal foo, 'bar'
memcached_kill(29125)
memcached(29125) do
foo = dc.get 'foo'
assert_nil foo
memcached_kill(29125)
end
end
end
it 'handle graceful failover' do
memcached(29125) do
memcached(29126) do
dc = Dalli::Client.new ['localhost:29125', 'localhost:29126']
dc.set 'foo', 'bar'
foo = dc.get 'foo'
assert_equal foo, 'bar'
memcached_kill(29125)
dc.set 'foo', 'bar'
foo = dc.get 'foo'
assert_equal foo, 'bar'
memcached_kill(29126)
assert_raises Dalli::RingError, :message => "No server available" do
dc.set 'foo', 'bar'
end
end
end
end
it 'handle them gracefully in get_multi' do
memcached(29125) do
memcached(29126) do
dc = Dalli::Client.new ['localhost:29125', 'localhost:29126']
dc.set 'a', 'a1'
result = dc.get_multi ['a']
assert_equal result, {'a' => 'a1'}
memcached_kill(29125)
result = dc.get_multi ['a']
assert_equal result, {'a' => 'a1'}
end
end
end
it 'handle graceful failover in get_multi' do
memcached(29125) do
memcached(29126) do
dc = Dalli::Client.new ['localhost:29125', 'localhost:29126']
dc.set 'foo', 'foo1'
dc.set 'bar', 'bar1'
result = dc.get_multi ['foo', 'bar']
assert_equal result, {'foo' => 'foo1', 'bar' => 'bar1'}
memcached_kill(29125)
dc.set 'foo', 'foo1'
dc.set 'bar', 'bar1'
result = dc.get_multi ['foo', 'bar']
assert_equal result, {'foo' => 'foo1', 'bar' => 'bar1'}
memcached_kill(29126)
result = dc.get_multi ['foo', 'bar']
assert_equal result, {}
end
end
end
it 'stats it still properly report' do
memcached(29125) do
memcached(29126) do
dc = Dalli::Client.new ['localhost:29125', 'localhost:29126']
result = dc.stats
assert_instance_of Hash, result['localhost:29125']
assert_instance_of Hash, result['localhost:29126']
memcached_kill(29125)
dc = Dalli::Client.new ['localhost:29125', 'localhost:29126']
result = dc.stats
assert_instance_of NilClass, result['localhost:29125']
assert_instance_of Hash, result['localhost:29126']
memcached_kill(29126)
end
end
end
end
end

View File

@ -0,0 +1,54 @@
require 'helper'
describe 'Network' do
describe 'assuming a bad network' do
it 'handle no server available' do
assert_raises Dalli::RingError, :message => "No server available" do
dc = Dalli::Client.new 'localhost:19333'
dc.get 'foo'
end
end
describe 'with a fake server' do
it 'handle connection reset' do
memcached_mock(lambda {|sock| sock.close }) do
assert_raises Dalli::RingError, :message => "No server available" do
dc = Dalli::Client.new('localhost:19123')
dc.get('abc')
end
end
end
it 'handle malformed response' do
memcached_mock(lambda {|sock| sock.write('123') }) do
assert_raises Dalli::RingError, :message => "No server available" do
dc = Dalli::Client.new('localhost:19123')
dc.get('abc')
end
end
end
it 'handle connect timeouts' do
memcached_mock(lambda {|sock| sleep(0.6); sock.close }, :delayed_start) do
assert_raises Dalli::RingError, :message => "No server available" do
dc = Dalli::Client.new('localhost:19123')
dc.get('abc')
end
end
end
it 'handle read timeouts' do
memcached_mock(lambda {|sock| sleep(0.6); sock.write('giraffe') }) do
assert_raises Dalli::RingError, :message => "No server available" do
dc = Dalli::Client.new('localhost:19123')
dc.get('abc')
end
end
end
end
end
end

View File

@ -0,0 +1,341 @@
require 'helper'
require 'rack/session/dalli'
require 'rack/lint'
require 'rack/mock'
require 'thread'
describe Rack::Session::Dalli do
Rack::Session::Dalli::DEFAULT_OPTIONS[:memcache_server] = 'localhost:19129'
before do
memcached(19129) do
end
# test memcache connection
Rack::Session::Dalli.new(incrementor)
end
let(:session_key) { Rack::Session::Dalli::DEFAULT_OPTIONS[:key] }
let(:session_match) do
/#{session_key}=([0-9a-fA-F]+);/
end
let(:incrementor_proc) do
lambda do |env|
env["rack.session"]["counter"] ||= 0
env["rack.session"]["counter"] += 1
Rack::Response.new(env["rack.session"].inspect).to_a
end
end
let(:drop_session) do
Rack::Lint.new(proc do |env|
env['rack.session.options'][:drop] = true
incrementor_proc.call(env)
end)
end
let(:renew_session) do
Rack::Lint.new(proc do |env|
env['rack.session.options'][:renew] = true
incrementor_proc.call(env)
end)
end
let(:defer_session) do
Rack::Lint.new(proc do |env|
env['rack.session.options'][:defer] = true
incrementor_proc.call(env)
end)
end
let(:skip_session) do
Rack::Lint.new(proc do |env|
env['rack.session.options'][:skip] = true
incrementor_proc.call(env)
end)
end
let(:incrementor) { Rack::Lint.new(incrementor_proc) }
it "faults on no connection" do
assert_raises Dalli::RingError do
Rack::Session::Dalli.new(incrementor, :memcache_server => 'nosuchserver')
end
end
it "connects to existing server" do
assert_silent do
rsd = Rack::Session::Dalli.new(incrementor, :namespace => 'test:rack:session')
rsd.pool.set('ping', '')
end
end
it "passes options to MemCache" do
rsd = Rack::Session::Dalli.new(incrementor, :namespace => 'test:rack:session')
assert_equal('test:rack:session', rsd.pool.instance_eval { @options[:namespace] })
end
it "creates a new cookie" do
rsd = Rack::Session::Dalli.new(incrementor)
res = Rack::MockRequest.new(rsd).get("/")
assert res["Set-Cookie"].include?("#{session_key}=")
assert_equal '{"counter"=>1}', res.body
end
it "determines session from a cookie" do
rsd = Rack::Session::Dalli.new(incrementor)
req = Rack::MockRequest.new(rsd)
res = req.get("/")
cookie = res["Set-Cookie"]
assert_equal '{"counter"=>2}', req.get("/", "HTTP_COOKIE" => cookie).body
assert_equal '{"counter"=>3}', req.get("/", "HTTP_COOKIE" => cookie).body
end
it "determines session only from a cookie by default" do
rsd = Rack::Session::Dalli.new(incrementor)
req = Rack::MockRequest.new(rsd)
res = req.get("/")
sid = res["Set-Cookie"][session_match, 1]
assert_equal '{"counter"=>1}', req.get("/?rack.session=#{sid}").body
assert_equal '{"counter"=>1}', req.get("/?rack.session=#{sid}").body
end
it "determines session from params" do
rsd = Rack::Session::Dalli.new(incrementor, :cookie_only => false)
req = Rack::MockRequest.new(rsd)
res = req.get("/")
sid = res["Set-Cookie"][session_match, 1]
assert_equal '{"counter"=>2}', req.get("/?rack.session=#{sid}").body
assert_equal '{"counter"=>3}', req.get("/?rack.session=#{sid}").body
end
it "survives nonexistant cookies" do
bad_cookie = "rack.session=blarghfasel"
rsd = Rack::Session::Dalli.new(incrementor)
res = Rack::MockRequest.new(rsd).
get("/", "HTTP_COOKIE" => bad_cookie)
assert_equal '{"counter"=>1}', res.body
cookie = res["Set-Cookie"][session_match]
refute_match(/#{bad_cookie}/, cookie)
end
it "survives nonexistant blank cookies" do
bad_cookie = "rack.session="
rsd = Rack::Session::Dalli.new(incrementor)
res = Rack::MockRequest.new(rsd).
get("/", "HTTP_COOKIE" => bad_cookie)
cookie = res["Set-Cookie"][session_match]
refute_match(/#{bad_cookie}$/, cookie)
end
it "maintains freshness" do
rsd = Rack::Session::Dalli.new(incrementor, :expire_after => 3)
res = Rack::MockRequest.new(rsd).get('/')
assert res.body.include?('"counter"=>1')
cookie = res["Set-Cookie"]
res = Rack::MockRequest.new(rsd).get('/', "HTTP_COOKIE" => cookie)
assert_equal cookie, res["Set-Cookie"]
assert res.body.include?('"counter"=>2')
puts 'Sleeping to expire session' if $DEBUG
sleep 4
res = Rack::MockRequest.new(rsd).get('/', "HTTP_COOKIE" => cookie)
refute_equal cookie, res["Set-Cookie"]
assert res.body.include?('"counter"=>1')
end
it "does not send the same session id if it did not change" do
rsd = Rack::Session::Dalli.new(incrementor)
req = Rack::MockRequest.new(rsd)
res0 = req.get("/")
cookie = res0["Set-Cookie"][session_match]
assert_equal '{"counter"=>1}', res0.body
res1 = req.get("/", "HTTP_COOKIE" => cookie)
assert_nil res1["Set-Cookie"]
assert_equal '{"counter"=>2}', res1.body
res2 = req.get("/", "HTTP_COOKIE" => cookie)
assert_nil res2["Set-Cookie"]
assert_equal '{"counter"=>3}', res2.body
end
it "deletes cookies with :drop option" do
rsd = Rack::Session::Dalli.new(incrementor)
req = Rack::MockRequest.new(rsd)
drop = Rack::Utils::Context.new(rsd, drop_session)
dreq = Rack::MockRequest.new(drop)
res1 = req.get("/")
session = (cookie = res1["Set-Cookie"])[session_match]
assert_equal '{"counter"=>1}', res1.body
res2 = dreq.get("/", "HTTP_COOKIE" => cookie)
assert_nil res2["Set-Cookie"]
assert_equal '{"counter"=>2}', res2.body
res3 = req.get("/", "HTTP_COOKIE" => cookie)
refute_equal session, res3["Set-Cookie"][session_match]
assert_equal '{"counter"=>1}', res3.body
end
it "provides new session id with :renew option" do
rsd = Rack::Session::Dalli.new(incrementor)
req = Rack::MockRequest.new(rsd)
renew = Rack::Utils::Context.new(rsd, renew_session)
rreq = Rack::MockRequest.new(renew)
res1 = req.get("/")
session = (cookie = res1["Set-Cookie"])[session_match]
assert_equal '{"counter"=>1}', res1.body
res2 = rreq.get("/", "HTTP_COOKIE" => cookie)
new_cookie = res2["Set-Cookie"]
new_session = new_cookie[session_match]
refute_equal session, new_session
assert_equal '{"counter"=>2}', res2.body
res3 = req.get("/", "HTTP_COOKIE" => new_cookie)
assert_equal '{"counter"=>3}', res3.body
# Old cookie was deleted
res4 = req.get("/", "HTTP_COOKIE" => cookie)
assert_equal '{"counter"=>1}', res4.body
end
it "omits cookie with :defer option but still updates the state" do
rsd = Rack::Session::Dalli.new(incrementor)
count = Rack::Utils::Context.new(rsd, incrementor)
defer = Rack::Utils::Context.new(rsd, defer_session)
dreq = Rack::MockRequest.new(defer)
creq = Rack::MockRequest.new(count)
res0 = dreq.get("/")
assert_nil res0["Set-Cookie"]
assert_equal '{"counter"=>1}', res0.body
res0 = creq.get("/")
res1 = dreq.get("/", "HTTP_COOKIE" => res0["Set-Cookie"])
assert_equal '{"counter"=>2}', res1.body
res2 = dreq.get("/", "HTTP_COOKIE" => res0["Set-Cookie"])
assert_equal '{"counter"=>3}', res2.body
end
it "omits cookie and state update with :skip option" do
rsd = Rack::Session::Dalli.new(incrementor)
count = Rack::Utils::Context.new(rsd, incrementor)
skip = Rack::Utils::Context.new(rsd, skip_session)
sreq = Rack::MockRequest.new(skip)
creq = Rack::MockRequest.new(count)
res0 = sreq.get("/")
assert_nil res0["Set-Cookie"]
assert_equal '{"counter"=>1}', res0.body
res0 = creq.get("/")
res1 = sreq.get("/", "HTTP_COOKIE" => res0["Set-Cookie"])
assert_equal '{"counter"=>2}', res1.body
res2 = sreq.get("/", "HTTP_COOKIE" => res0["Set-Cookie"])
assert_equal '{"counter"=>2}', res2.body
end
it "updates deep hashes correctly" do
hash_check = proc do |env|
session = env['rack.session']
unless session.include? 'test'
session.update :a => :b, :c => { :d => :e },
:f => { :g => { :h => :i} }, 'test' => true
else
session[:f][:g][:h] = :j
end
[200, {}, [session.inspect]]
end
rsd = Rack::Session::Dalli.new(hash_check)
req = Rack::MockRequest.new(rsd)
res0 = req.get("/")
session_id = (cookie = res0["Set-Cookie"])[session_match, 1]
ses0 = rsd.pool.get(session_id, true)
req.get("/", "HTTP_COOKIE" => cookie)
ses1 = rsd.pool.get(session_id, true)
refute_equal ses0, ses1
end
# anyone know how to do this better?
it "cleanly merges sessions when multithreaded" do
unless $DEBUG
assert_equal 1, 1 # fake assertion to appease the mighty bacon
next
end
warn 'Running multithread test for Session::Dalli'
rsd = Rack::Session::Dalli.new(incrementor)
req = Rack::MockRequest.new(rsd)
res = req.get('/')
assert_equal '{"counter"=>1}', res.body
cookie = res["Set-Cookie"]
session_id = cookie[session_match, 1]
delta_incrementor = lambda do |env|
# emulate disconjoinment of threading
env['rack.session'] = env['rack.session'].dup
Thread.stop
env['rack.session'][(Time.now.usec*rand).to_i] = true
incrementor.call(env)
end
tses = Rack::Utils::Context.new rsd, delta_incrementor
treq = Rack::MockRequest.new(tses)
tnum = rand(7).to_i+5
r = Array.new(tnum) do
Thread.new(treq) do |run|
run.get('/', "HTTP_COOKIE" => cookie, 'rack.multithread' => true)
end
end.reverse.map{|t| t.run.join.value }
r.each do |request|
assert_equal cookie, request['Set-Cookie']
assert request.body.include?('"counter"=>2')
end
session = rsd.pool.get(session_id)
assert_equal tnum+1, session.size # counter
assert_equal 2, session['counter'] # meeeh
tnum = rand(7).to_i+5
r = Array.new(tnum) do |i|
app = Rack::Utils::Context.new rsd, time_delta
req = Rack::MockRequest.new app
Thread.new(req) do |run|
run.get('/', "HTTP_COOKIE" => cookie, 'rack.multithread' => true)
end
end.reverse.map{|t| t.run.join.value }
r.each do |request|
assert_equal cookie, request['Set-Cookie']
assert request.body.include?('"counter"=>3')
end
session = rsd.pool.get(session_id)
assert_equal tnum+1, session.size
assert_equal 3, session['counter']
drop_counter = proc do |env|
env['rack.session'].delete 'counter'
env['rack.session']['foo'] = 'bar'
[200, {'Content-Type'=>'text/plain'}, env['rack.session'].inspect]
end
tses = Rack::Utils::Context.new rsd, drop_counter
treq = Rack::MockRequest.new(tses)
tnum = rand(7).to_i+5
r = Array.new(tnum) do
Thread.new(treq) do |run|
run.get('/', "HTTP_COOKIE" => cookie, 'rack.multithread' => true)
end
end.reverse.map{|t| t.run.join.value }
r.each do |request|
assert_equal cookie, request['Set-Cookie']
assert request.body.include?('"foo"=>"bar"')
end
session = rsd.pool.get(session_id)
assert_equal r.size+1, session.size
assert_nil session['counter']
assert_equal 'bar', session['foo']
end
end

View File

@ -0,0 +1,85 @@
require 'helper'
describe 'Ring' do
describe 'a ring of servers' do
it "have the continuum sorted by value" do
servers = [stub(:hostname => "localhost", :port => "11211", :weight => 1),
stub(:hostname => "localhost", :port => "9500", :weight => 1)]
ring = Dalli::Ring.new(servers, {})
previous_value = 0
ring.continuum.each do |entry|
assert entry.value > previous_value
previous_value = entry.value
end
end
it 'raise when no servers are available/defined' do
ring = Dalli::Ring.new([], {})
assert_raises Dalli::RingError, :message => "No server available" do
ring.server_for_key('test')
end
end
describe 'containing only a single server' do
it "raise correctly when it's not alive" do
servers = [
Dalli::Server.new("localhost:12345"),
]
ring = Dalli::Ring.new(servers, {})
assert_raises Dalli::RingError, :message => "No server available" do
ring.server_for_key('test')
end
end
it "return the server when it's alive" do
servers = [
Dalli::Server.new("localhost:19191"),
]
ring = Dalli::Ring.new(servers, {})
memcached(19191) do |mc|
ring = mc.send(:ring)
assert_equal ring.servers.first.port, ring.server_for_key('test').port
end
end
end
describe 'containing multiple servers' do
it "raise correctly when no server is alive" do
servers = [
Dalli::Server.new("localhost:12345"),
Dalli::Server.new("localhost:12346"),
]
ring = Dalli::Ring.new(servers, {})
assert_raises Dalli::RingError, :message => "No server available" do
ring.server_for_key('test')
end
end
it "return an alive server when at least one is alive" do
servers = [
Dalli::Server.new("localhost:12346"),
Dalli::Server.new("localhost:19191"),
]
ring = Dalli::Ring.new(servers, {})
memcached(19191) do |mc|
ring = mc.send(:ring)
assert_equal ring.servers.first.port, ring.server_for_key('test').port
end
end
end
it 'detect when a dead server is up again' do
memcached(19997) do
down_retry_delay = 0.5
dc = Dalli::Client.new(['localhost:19997', 'localhost:19998'], :down_retry_delay => down_retry_delay)
assert_equal 1, dc.stats.values.compact.count
memcached(19998) do
assert_equal 2, dc.stats.values.compact.count
end
end
end
end
end

View File

@ -0,0 +1,110 @@
require 'helper'
describe 'Sasl' do
# https://github.com/seattlerb/minitest/issues/298
def self.xit(msg, &block)
end
describe 'a server requiring authentication' do
before do
@server = mock()
@server.stubs(:request).returns(true)
@server.stubs(:weight).returns(1)
@server.stubs(:hostname).returns("localhost")
@server.stubs(:port).returns("19124")
end
describe 'without authentication credentials' do
before do
ENV['MEMCACHE_USERNAME'] = 'foo'
ENV['MEMCACHE_PASSWORD'] = 'wrongpwd'
end
after do
ENV['MEMCACHE_USERNAME'] = nil
ENV['MEMCACHE_PASSWORD'] = nil
end
it 'provide one test that passes' do
assert true
end
it 'gracefully handle authentication failures' do
memcached(19124, '-S') do |dc|
assert_error Dalli::DalliError, /32/ do
dc.set('abc', 123)
end
end
end
end
it 'fail SASL authentication with wrong options' do
memcached(19124, '-S') do |dc|
dc = Dalli::Client.new('localhost:19124', :username => 'foo', :password => 'wrongpwd')
assert_error Dalli::DalliError, /32/ do
dc.set('abc', 123)
end
end
end
# OSX: Create a SASL user for the memcached application like so:
#
# saslpasswd2 -a memcached -c testuser
#
# with password 'testtest'
describe 'in an authenticated environment' do
before do
ENV['MEMCACHE_USERNAME'] = 'testuser'
ENV['MEMCACHE_PASSWORD'] = 'testtest'
end
after do
ENV['MEMCACHE_USERNAME'] = nil
ENV['MEMCACHE_PASSWORD'] = nil
end
xit 'pass SASL authentication' do
memcached(19124, '-S') do |dc|
# I get "Dalli::DalliError: Error authenticating: 32" in OSX
# but SASL works on Heroku servers. YMMV.
assert_equal true, dc.set('abc', 123)
assert_equal 123, dc.get('abc')
results = dc.stats
assert_equal 1, results.size
assert_equal 38, results.values.first.size
end
end
end
xit 'pass SASL authentication with options' do
memcached(19124, '-S') do |dc|
dc = Dalli::Client.new('localhost:19124', :username => 'testuser', :password => 'testtest')
# I get "Dalli::DalliError: Error authenticating: 32" in OSX
# but SASL works on Heroku servers. YMMV.
assert_equal true, dc.set('abc', 123)
assert_equal 123, dc.get('abc')
results = dc.stats
assert_equal 1, results.size
assert_equal 38, results.values.first.size
end
end
it 'pass SASL as URI' do
Dalli::Server.expects(:new).with("localhost:19124",
:username => "testuser", :password => "testtest").returns(@server)
dc = Dalli::Client.new('memcached://testuser:testtest@localhost:19124')
dc.flush_all
end
it 'pass SASL as ring of URIs' do
Dalli::Server.expects(:new).with("localhost:19124",
:username => "testuser", :password => "testtest").returns(@server)
Dalli::Server.expects(:new).with("otherhost:19125",
:username => "testuser2", :password => "testtest2").returns(@server)
dc = Dalli::Client.new(['memcached://testuser:testtest@localhost:19124',
'memcached://testuser2:testtest2@otherhost:19125'])
dc.flush_all
end
end
end

View File

@ -0,0 +1,30 @@
# encoding: utf-8
require 'helper'
require 'json'
require 'memcached_mock'
describe 'Serializer' do
it 'default to Marshal' do
memcached_kill(29198) do |dc|
memcache = Dalli::Client.new('127.0.0.1:29198')
memcache.set 1,2
assert_equal Marshal, memcache.instance_variable_get('@ring').servers.first.serializer
end
end
it 'support a custom serializer' do
memcached_kill(29198) do |dc|
memcache = Dalli::Client.new('127.0.0.1:29198', :serializer => JSON)
memcache.set 1,2
begin
assert_equal JSON, memcache.instance_variable_get('@ring').servers.first.serializer
memcached(19128) do |newdc|
assert newdc.set("json_test", {"foo" => "bar"})
assert_equal({"foo" => "bar"}, newdc.get("json_test"))
end
end
end
end
end

View File

@ -0,0 +1,80 @@
require 'helper'
describe Dalli::Server do
describe 'hostname parsing' do
it 'handles no port or weight' do
s = Dalli::Server.new('localhost')
assert_equal 'localhost', s.hostname
assert_equal 11211, s.port
assert_equal 1, s.weight
end
it 'handles a port, but no weight' do
s = Dalli::Server.new('localhost:11212')
assert_equal 'localhost', s.hostname
assert_equal 11212, s.port
assert_equal 1, s.weight
end
it 'handles a port and a weight' do
s = Dalli::Server.new('localhost:11212:2')
assert_equal 'localhost', s.hostname
assert_equal 11212, s.port
assert_equal 2, s.weight
end
it 'handles ipv4 addresses' do
s = Dalli::Server.new('127.0.0.1')
assert_equal '127.0.0.1', s.hostname
assert_equal 11211, s.port
assert_equal 1, s.weight
end
it 'handles ipv6 addresses' do
s = Dalli::Server.new('[::1]')
assert_equal '::1', s.hostname
assert_equal 11211, s.port
assert_equal 1, s.weight
end
it 'handles ipv6 addresses with port' do
s = Dalli::Server.new('[::1]:11212')
assert_equal '::1', s.hostname
assert_equal 11212, s.port
assert_equal 1, s.weight
end
it 'handles ipv6 addresses with port and weight' do
s = Dalli::Server.new('[::1]:11212:2')
assert_equal '::1', s.hostname
assert_equal 11212, s.port
assert_equal 2, s.weight
end
it 'handles a FQDN' do
s = Dalli::Server.new('my.fqdn.com')
assert_equal 'my.fqdn.com', s.hostname
assert_equal 11211, s.port
assert_equal 1, s.weight
end
it 'handles a FQDN with port and weight' do
s = Dalli::Server.new('my.fqdn.com:11212:2')
assert_equal 'my.fqdn.com', s.hostname
assert_equal 11212, s.port
assert_equal 2, s.weight
end
end
describe 'ttl translation' do
it 'does not translate ttls under 30 days' do
s = Dalli::Server.new('localhost')
assert_equal s.send(:sanitize_ttl, 30*24*60*60), 30*24*60*60
end
it 'translates ttls over 30 days into timestamps' do
s = Dalli::Server.new('localhost')
assert_equal s.send(:sanitize_ttl, 30*24*60*60 + 1), Time.now.to_i + 30*24*60*60+1
end
end
end

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 958 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 169 KiB