View Javadoc

1   /*
2    *  Licensed to the Apache Software Foundation (ASF) under one
3    *  or more contributor license agreements.  See the NOTICE file
4    *  distributed with this work for additional information
5    *  regarding copyright ownership.  The ASF licenses this file
6    *  to you under the Apache License, Version 2.0 (the
7    *  "License"); you may not use this file except in compliance
8    *  with the License.  You may obtain a copy of the License at
9    *
10   *    http://www.apache.org/licenses/LICENSE-2.0
11   *
12   *  Unless required by applicable law or agreed to in writing,
13   *  software distributed under the License is distributed on an
14   *  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15   *  KIND, either express or implied.  See the License for the
16   *  specific language governing permissions and limitations
17   *  under the License.
18   *
19   */
20  package org.apache.mina.core.buffer;
21  
22  import java.nio.ByteBuffer;
23  import java.nio.ByteOrder;
24  import java.util.HashMap;
25  import java.util.Map;
26  import java.util.Queue;
27  import java.util.concurrent.ConcurrentLinkedQueue;
28  
29  /**
30   * An {@link IoBufferAllocator} that caches the buffers which are likely to
31   * be reused during auto-expansion of the buffers.
32   * <p>
33   * In {@link SimpleBufferAllocator}, the underlying {@link ByteBuffer} of
34   * the {@link IoBuffer} is reallocated on its capacity change, which means
35   * the newly allocated bigger {@link ByteBuffer} replaces the old small
36   * {@link ByteBuffer}.  Consequently, the old {@link ByteBuffer} is marked
37   * for garbage collection.
38   * <p>
39   * It's not a problem in most cases as long as the capacity change doesn't
40   * happen frequently.  However, once it happens too often, it burdens the
41   * VM and the cost of filling the newly allocated {@link ByteBuffer} with
42   * {@code NUL} surpass the cost of accessing the cache.  In 2 dual-core
43   * Opteron Italy 270 processors, {@link CachedBufferAllocator} outperformed
44   * {@link SimpleBufferAllocator} in the following situation:
45   * <ul>
46   * <li>when a 32 bytes buffer is expanded 4 or more times,</li> 
47   * <li>when a 64 bytes buffer is expanded 4 or more times,</li>
48   * <li>when a 128 bytes buffer is expanded 2 or more times,</li>
49   * <li>and when a 256 bytes or bigger buffer is expanded 1 or more times.</li>
50   * </ul>
51   * Please note the observation above is subject to change in a different
52   * environment.
53   * <p>
54   * {@link CachedBufferAllocator} uses {@link ThreadLocal} to store the cached
55   * buffer, allocates buffers whose capacity is power of 2 only and provides
56   * performance advantage if {@link IoBuffer#free()} is called properly.
57   *
58   * @author <a href="http://mina.apache.org">Apache MINA Project</a>
59   */
60  public class CachedBufferAllocator implements IoBufferAllocator {
61  
62      private static final int DEFAULT_MAX_POOL_SIZE = 8;
63      private static final int DEFAULT_MAX_CACHED_BUFFER_SIZE = 1 << 18; // 256KB
64      
65      private final int maxPoolSize;
66      private final int maxCachedBufferSize;
67  
68      private final ThreadLocal<Map<Integer, Queue<CachedBuffer>>> heapBuffers;
69      private final ThreadLocal<Map<Integer, Queue<CachedBuffer>>> directBuffers;
70      
71      /**
72       * Creates a new instance with the default parameters
73       * ({@literal #DEFAULT_MAX_POOL_SIZE} and {@literal #DEFAULT_MAX_CACHED_BUFFER_SIZE}). 
74       */
75      public CachedBufferAllocator() {
76          this(DEFAULT_MAX_POOL_SIZE, DEFAULT_MAX_CACHED_BUFFER_SIZE);
77      }
78      
79      /**
80       * Creates a new instance.
81       * 
82       * @param maxPoolSize the maximum number of buffers with the same capacity per thread.
83       *                    <tt>0</tt> disables this limitation.
84       * @param maxCachedBufferSize the maximum capacity of a cached buffer.
85       *                            A buffer whose capacity is bigger than this value is
86       *                            not pooled. <tt>0</tt> disables this limitation.
87       */
88      public CachedBufferAllocator(int maxPoolSize, int maxCachedBufferSize) {
89          if (maxPoolSize < 0) {
90              throw new IllegalArgumentException("maxPoolSize: " + maxPoolSize);
91          }
92          
93          if (maxCachedBufferSize < 0) {
94              throw new IllegalArgumentException("maxCachedBufferSize: " + maxCachedBufferSize);
95          }
96          
97          this.maxPoolSize = maxPoolSize;
98          this.maxCachedBufferSize = maxCachedBufferSize;
99          
100         this.heapBuffers = new ThreadLocal<Map<Integer, Queue<CachedBuffer>>>() {
101             @Override
102             protected Map<Integer, Queue<CachedBuffer>> initialValue() {
103                 return newPoolMap();
104             }
105         };
106         
107         this.directBuffers = new ThreadLocal<Map<Integer, Queue<CachedBuffer>>>() {
108             @Override
109             protected Map<Integer, Queue<CachedBuffer>> initialValue() {
110                 return newPoolMap();
111             }
112         };
113     }
114     
115     /**
116      * Returns the maximum number of buffers with the same capacity per thread.
117      * <tt>0</tt> means 'no limitation'.
118      */
119     public int getMaxPoolSize() {
120         return maxPoolSize;
121     }
122 
123     /**
124      * Returns the maximum capacity of a cached buffer.  A buffer whose
125      * capacity is bigger than this value is not pooled.  <tt>0</tt> means
126      * 'no limitation'.
127      */
128     public int getMaxCachedBufferSize() {
129         return maxCachedBufferSize;
130     }
131 
132     Map<Integer, Queue<CachedBuffer>> newPoolMap() {
133         Map<Integer, Queue<CachedBuffer>> poolMap =
134             new HashMap<Integer, Queue<CachedBuffer>>();
135         int poolSize = maxPoolSize == 0? DEFAULT_MAX_POOL_SIZE : maxPoolSize;
136         
137         for (int i = 0; i < 31; i ++) {
138             poolMap.put(1 << i, new ConcurrentLinkedQueue<CachedBuffer>());
139         }
140         
141         poolMap.put(0, new ConcurrentLinkedQueue<CachedBuffer>());
142         poolMap.put(Integer.MAX_VALUE, new ConcurrentLinkedQueue<CachedBuffer>());
143         
144         return poolMap;
145     }
146 
147     public IoBuffer allocate(int requestedCapacity, boolean direct) {
148         int actualCapacity = IoBuffer.normalizeCapacity(requestedCapacity);
149         IoBuffer buf ;
150         
151         if ((maxCachedBufferSize != 0) && (actualCapacity > maxCachedBufferSize)) {
152             if (direct) {
153                 buf = wrap(ByteBuffer.allocateDirect(actualCapacity));
154             } else {
155                 buf = wrap(ByteBuffer.allocate(actualCapacity));
156             }
157         } else {
158             Queue<CachedBuffer> pool;
159             
160             if (direct) {
161                 pool = directBuffers.get().get(actualCapacity);
162             } else {
163                 pool = heapBuffers.get().get(actualCapacity);
164             }
165             
166             // Recycle if possible.
167             buf = pool.poll();
168             
169             if (buf != null) {
170                 buf.clear();
171                 buf.setAutoExpand(false);
172                 buf.order(ByteOrder.BIG_ENDIAN);
173             } else {
174                 if (direct) {
175                     buf = wrap(ByteBuffer.allocateDirect(actualCapacity));
176                 } else {
177                     buf = wrap(ByteBuffer.allocate(actualCapacity));
178                 }
179             }
180         }
181         
182         buf.limit(requestedCapacity);
183         return buf;
184     }
185     
186     public ByteBuffer allocateNioBuffer(int capacity, boolean direct) {
187         return allocate(capacity, direct).buf();
188     }
189     
190     public IoBuffer wrap(ByteBuffer nioBuffer) {
191         return new CachedBuffer(nioBuffer);
192     }
193 
194     public void dispose() {
195         // Do nothing
196     }
197     
198     private class CachedBuffer extends AbstractIoBuffer {
199         private final Thread ownerThread;
200         private ByteBuffer buf;
201 
202         protected CachedBuffer(ByteBuffer buf) {
203             super(CachedBufferAllocator.this, buf.capacity());
204             this.ownerThread = Thread.currentThread();
205             this.buf = buf;
206             buf.order(ByteOrder.BIG_ENDIAN);
207         }
208         
209         protected CachedBuffer(CachedBuffer parent, ByteBuffer buf) {
210             super(parent);
211             this.ownerThread = Thread.currentThread();
212             this.buf = buf;
213         }
214 
215         @Override
216         public ByteBuffer buf() {
217             if (buf == null) {
218                 throw new IllegalStateException("Buffer has been freed already.");
219             }
220             return buf;
221         }
222         
223         @Override
224         protected void buf(ByteBuffer buf) {
225             ByteBuffer oldBuf = this.buf;
226             this.buf = buf;
227             free(oldBuf);
228         }
229 
230         @Override
231         protected IoBuffer duplicate0() {
232             return new CachedBuffer(this, buf().duplicate());
233         }
234 
235         @Override
236         protected IoBuffer slice0() {
237             return new CachedBuffer(this, buf().slice());
238         }
239 
240         @Override
241         protected IoBuffer asReadOnlyBuffer0() {
242             return new CachedBuffer(this, buf().asReadOnlyBuffer());
243         }
244 
245         @Override
246         public byte[] array() {
247             return buf().array();
248         }
249 
250         @Override
251         public int arrayOffset() {
252             return buf().arrayOffset();
253         }
254 
255         @Override
256         public boolean hasArray() {
257             return buf().hasArray();
258         }
259 
260         @Override
261         public void free() {
262             free(buf);
263             buf = null;
264         }
265         
266         private void free(ByteBuffer oldBuf) {
267             if ((oldBuf == null) || 
268                 ((maxCachedBufferSize != 0 ) && (oldBuf.capacity() > maxCachedBufferSize)) ||
269                 oldBuf.isReadOnly() || 
270                 isDerived() ||
271                 (Thread.currentThread() != ownerThread)) {
272                 return;
273             }
274 
275             // Add to the cache.
276             Queue<CachedBuffer> pool;
277             
278             if (oldBuf.isDirect()) {
279                 pool = directBuffers.get().get(oldBuf.capacity());
280             } else {
281                 pool = heapBuffers.get().get(oldBuf.capacity());
282             }
283             
284             if (pool == null) {
285                 return;
286             }
287 
288             // Restrict the size of the pool to prevent OOM.
289             if ((maxPoolSize == 0) || (pool.size() < maxPoolSize)) {
290                 pool.offer(new CachedBuffer(oldBuf));
291             }
292         }
293     }
294 }