RMI方式Ehcache集群的源码分析
Ehcache不仅支持基本的内存缓存,还支持多种方式将本地内存中的缓存同步到其他使用Ehcache的服务器中,形成集群。如下图所示:
1服务Provider
1.1自动发现配置
<cacheManagerPeerProviderFactory
class="net.sf.ehcache.distribution.RMICacheManagerPeerProviderFactory"
properties="peerDiscovery=automatic,
multicastGroupAddress=230.0.0.1,
multicastGroupPort=4446, timeToLive=32"/>
1.2手动发现配置
<cacheManagerPeerProviderFactory
class="net.sf.ehcache.distribution.RMICacheManagerPeerProviderFactory"
properties="peerDiscovery=manual,rmiUrls=//server2:40001/sampleCache11|//server2:40001/sampleCache12"/>
<cacheManagerPeerProviderFactory
class="net.sf.ehcache.distribution.RMICacheManagerPeerProviderFactory"
properties="peerDiscovery=manual,rmiUrls=//server1:40001/sampleCache11|//server1:40001/sampleCache12"/>
1.3源码分析-RMICacheManagerPeerProviderFactory
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
|
public CacheManagerPeerProvider createCachePeerProvider(CacheManager cacheManager, Properties properties) throws CacheException { String peerDiscovery = PropertyUtil.extractAndLogProperty(PEER_DISCOVERY, properties); if (peerDiscovery == null || peerDiscovery.equalsIgnoreCase(AUTOMATIC_PEER_DISCOVERY)) { try { return createAutomaticallyConfiguredCachePeerProvider(cacheManager, properties); } catch (IOException e) { throw new CacheException( "Could not create CacheManagerPeerProvider. Initial cause was " + e.getMessage(), e); } } else if (peerDiscovery.equalsIgnoreCase(MANUALLY_CONFIGURED_PEER_DISCOVERY)) { return createManuallyConfiguredCachePeerProvider(properties); } else { return null ; } } protected CacheManagerPeerProvider createManuallyConfiguredCachePeerProvider(Properties properties) { String rmiUrls = PropertyUtil.extractAndLogProperty(RMI_URLS, properties); if (rmiUrls == null || rmiUrls.length() == 0 ) { LOG.info( "Starting manual peer provider with empty list of peers. " + "No replication will occur unless peers are added." ); rmiUrls = new String(); } rmiUrls = rmiUrls.trim(); StringTokenizer stringTokenizer = new StringTokenizer(rmiUrls, PayloadUtil.URL_DELIMITER); RMICacheManagerPeerProvider rmiPeerProvider = new ManualRMICacheManagerPeerProvider(); while (stringTokenizer.hasMoreTokens()) { String rmiUrl = stringTokenizer.nextToken(); rmiUrl = rmiUrl.trim(); rmiPeerProvider.registerPeer(rmiUrl); LOG.debug( "Registering peer {}" , rmiUrl); } return rmiPeerProvider; } |
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
|
public final synchronized void registerPeer(String rmiUrl) { peerUrls.put(rmiUrl, new Date()); } public final synchronized List listRemoteCachePeers(Ehcache cache) throws CacheException { List remoteCachePeers = new ArrayList(); List staleList = new ArrayList(); for (Iterator iterator = peerUrls.keySet().iterator(); iterator.hasNext();) { String rmiUrl = (String) iterator.next(); String rmiUrlCacheName = extractCacheName(rmiUrl); if (!rmiUrlCacheName.equals(cache.getName())) { continue ; } Date date = (Date) peerUrls.get(rmiUrl); if (!stale(date)) { CachePeer cachePeer = null ; try { cachePeer = lookupRemoteCachePeer(rmiUrl); remoteCachePeers.add(cachePeer); } catch (Exception e) { if (LOG.isDebugEnabled()) { LOG.debug( "Looking up rmiUrl " + rmiUrl + " through exception " + e.getMessage() + ". This may be normal if a node has gone offline. Or it may indicate network connectivity" + " difficulties" , e); } } } else { LOG.debug( "rmiUrl {} should never be stale for a manually configured cluster." , rmiUrl); staleList.add(rmiUrl); } } //Remove any stale remote peers. Must be done here to avoid concurrent modification exception. for ( int i = 0 ; i < staleList.size(); i++) { String rmiUrl = (String) staleList.get(i); peerUrls.remove(rmiUrl); } return remoteCachePeers; } public CachePeer lookupRemoteCachePeer(String url) throws MalformedURLException, NotBoundException, RemoteException { LOG.debug( "Lookup URL {}" , url); CachePeer cachePeer = (CachePeer) Naming.lookup(url); return cachePeer; } |
1
2
3
4
5
6
7
8
9
10
11
|
public MulticastRMICacheManagerPeerProvider(CacheManager cacheManager, InetAddress groupMulticastAddress, Integer groupMulticastPort, Integer timeToLive, InetAddress hostAddress) { super (cacheManager); heartBeatReceiver = new MulticastKeepaliveHeartbeatReceiver( this , groupMulticastAddress, groupMulticastPort, hostAddress); heartBeatSender = new MulticastKeepaliveHeartbeatSender(cacheManager, groupMulticastAddress, groupMulticastPort, timeToLive, hostAddress); } |
2服务Listener
2.1配置文件
<cacheManagerPeerListenerFactory
class="net.sf.ehcache.distribution.RMICacheManagerPeerListenerFactory"
properties="hostName=localhost,
port=40001,
socketTimeoutMillis=2000"/>
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
|
public final CacheManagerPeerListener createCachePeerListener(CacheManager cacheManager, Properties properties) throws CacheException { String hostName = PropertyUtil.extractAndLogProperty(HOSTNAME, properties); String portString = PropertyUtil.extractAndLogProperty(PORT, properties); Integer port = null ; if (portString != null && portString.length() != 0 ) { port = Integer.valueOf(portString); } else { port = Integer.valueOf( 0 ); } //0 means any port in UnicastRemoteObject, so it is ok if not specified to make it 0 String remoteObjectPortString = PropertyUtil.extractAndLogProperty(REMOTE_OBJECT_PORT, properties); Integer remoteObjectPort = null ; if (remoteObjectPortString != null && remoteObjectPortString.length() != 0 ) { remoteObjectPort = Integer.valueOf(remoteObjectPortString); } else { remoteObjectPort = Integer.valueOf( 0 ); } String socketTimeoutMillisString = PropertyUtil.extractAndLogProperty(SOCKET_TIMEOUT_MILLIS, properties); Integer socketTimeoutMillis; if (socketTimeoutMillisString == null || socketTimeoutMillisString.length() == 0 ) { socketTimeoutMillis = DEFAULT_SOCKET_TIMEOUT_MILLIS; } else { socketTimeoutMillis = Integer.valueOf(socketTimeoutMillisString); } return doCreateCachePeerListener(hostName, port, remoteObjectPort, cacheManager, socketTimeoutMillis); } protected CacheManagerPeerListener doCreateCachePeerListener(String hostName, Integer port, Integer remoteObjectPort, CacheManager cacheManager, Integer socketTimeoutMillis) { try { return new RMICacheManagerPeerListener(hostName, port, remoteObjectPort, cacheManager, socketTimeoutMillis); } catch (UnknownHostException e) { throw new CacheException( "Unable to create CacheManagerPeerListener. Initial cause was " + e.getMessage(), e); } } |
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
|
public void init() throws CacheException { if (!status.equals(Status.STATUS_UNINITIALISED)) { return ; } RMICachePeer rmiCachePeer = null ; try { startRegistry(); int counter = 0 ; populateListOfRemoteCachePeers(); synchronized (cachePeers) { for (Iterator iterator = cachePeers.values().iterator(); iterator.hasNext();) { rmiCachePeer = (RMICachePeer) iterator.next(); bind(rmiCachePeer.getUrl(), rmiCachePeer); counter++; } } LOG.debug(counter + " RMICachePeers bound in registry for RMI listener" ); status = Status.STATUS_ALIVE; } catch (Exception e) { String url = null ; if (rmiCachePeer != null ) { url = rmiCachePeer.getUrl(); } throw new CacheException( "Problem starting listener for RMICachePeer " + url + ". Initial cause was " + e.getMessage(), e); } } protected void startRegistry() throws RemoteException { try { registry = LocateRegistry.getRegistry(port.intValue()); try { registry.list(); } catch (RemoteException e) { //may not be created. Let's create it. registry = LocateRegistry.createRegistry(port.intValue()); registryCreated = true ; } } catch (ExportException exception) { LOG.error( "Exception starting RMI registry. Error was " + exception.getMessage(), exception); } } protected void populateListOfRemoteCachePeers() throws RemoteException { String[] names = cacheManager.getCacheNames(); for ( int i = 0 ; i < names.length; i++) { String name = names[i]; Ehcache cache = cacheManager.getEhcache(name); synchronized (cachePeers) { if (cachePeers.get(name) == null ) { if (isDistributed(cache)) { RMICachePeer peer = new RMICachePeer(cache, hostName, port, remoteObjectPort, socketTimeoutMillis); cachePeers.put(name, peer); } } } } } |
3 事件Listener
3.1配置文件
<!-- Sample cache named sampleCache2. -->
<cache name ="sampleCache2"
maxEntriesLocalHeap ="10"
eternal="false"
timeToIdleSeconds ="100"
timeToLiveSeconds ="100"
overflowToDisk="false" >
<cacheEventListenerFactory
class="net.sf.ehcache.distribution.RMICacheReplicatorFactory"
properties="replicateAsynchronously=true,
replicatePuts=true, replicateUpdates=true,
replicateUpdatesViaCopy=false, replicateRemovals=true "/>
</cache>
<!-- Sample cache named sampleCache4. All missing RMICacheReplicatorFactory properties
default to true -->
<cache name="sampleCache4"
maxEntriesLocalHeap="10"
eternal="true"
overflowToDisk="false"
memoryStoreEvictionPolicy="LFU">
<cacheEventListenerFactory
class="net.sf.ehcache.distribution.RMICacheReplicatorFactory"/>
</cache>
3.2源码分析
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
|
public final CacheEventListener createCacheEventListener(Properties properties) { boolean replicatePuts = extractReplicatePuts(properties); boolean replicatePutsViaCopy = extractReplicatePutsViaCopy(properties); boolean replicateUpdates = extractReplicateUpdates(properties); boolean replicateUpdatesViaCopy = extractReplicateUpdatesViaCopy(properties); boolean replicateRemovals = extractReplicateRemovals(properties); boolean replicateAsynchronously = extractReplicateAsynchronously(properties); int asynchronousReplicationIntervalMillis = extractReplicationIntervalMilis(properties); if (replicateAsynchronously) { return new RMIAsynchronousCacheReplicator( replicatePuts, replicatePutsViaCopy, replicateUpdates, replicateUpdatesViaCopy, replicateRemovals, asynchronousReplicationIntervalMillis); } else { return new RMISynchronousCacheReplicator( replicatePuts, replicatePutsViaCopy, replicateUpdates, replicateUpdatesViaCopy, replicateRemovals); } } |
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
|
/** * Whether a put should replicated by copy or by invalidation, (a remove). * <p/> * By copy is best when the entry is expensive to produce. By invalidation is best when * we are really trying to force other caches to sync back to a canonical source like a database. * An example of a latter usage would be a read/write cache being used in Hibernate. * <p/> * This setting only has effect if <code>#replicateUpdates</code> is true. */ protected boolean replicatePutsViaCopy; public void notifyElementPut( final Ehcache cache, final Element element) throws CacheException { if (notAlive()) { return ; } if (!replicatePuts) { return ; } if (!element.isSerializable()) { if (LOG.isWarnEnabled()) { LOG.warn( "Object with key " + element.getObjectKey() + " is not Serializable and cannot be replicated" ); } return ; } if (replicatePutsViaCopy) { replicatePutNotification(cache, element); } else { replicateRemovalNotification(cache, (Serializable) element.getObjectKey()); } } protected static void replicatePutNotification(Ehcache cache, Element element) throws RemoteCacheException { List cachePeers = listRemoteCachePeers(cache); for (Object cachePeer1 : cachePeers) { CachePeer cachePeer = (CachePeer) cachePeer1; try { cachePeer.put(element); } catch (Throwable t) { LOG.error( "Exception on replication of putNotification. " + t.getMessage() + ". Continuing..." , t); } } } static List listRemoteCachePeers(Ehcache cache) { CacheManagerPeerProvider provider = cache.getCacheManager().getCacheManagerPeerProvider( "RMI" ); return provider.listRemoteCachePeers(cache); } |
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
|
public final void notifyElementPut( final Ehcache cache, final Element element) throws CacheException { if (notAlive()) { return ; } if (!replicatePuts) { return ; } if (replicatePutsViaCopy) { if (!element.isSerializable()) { if (LOG.isWarnEnabled()) { LOG.warn( "Object with key " + element.getObjectKey() + " is not Serializable and cannot be replicated." ); } return ; } addToReplicationQueue( new CacheEventMessage(EventMessage.PUT, cache, element, null )); } else { if (!element.isKeySerializable()) { if (LOG.isWarnEnabled()) { LOG.warn( "Object with key " + element.getObjectKey() + " does not have a Serializable key and cannot be replicated via invalidate." ); } return ; } addToReplicationQueue( new CacheEventMessage(EventMessage.REMOVE, cache, null , element.getKey())); } } protected void addToReplicationQueue(CacheEventMessage cacheEventMessage) { if (!replicationThread.isAlive()) { LOG.error( "CacheEventMessages cannot be added to the replication queue because the replication thread has died." ); } else { synchronized (replicationQueue) { replicationQueue.add(cacheEventMessage); } } } private final class ReplicationThread extends Thread { public ReplicationThread() { super ( "Replication Thread" ); setDaemon( true ); setPriority(Thread.NORM_PRIORITY); } public final void run() { replicationThreadMain(); } } private void replicationThreadMain() { while ( true ) { // Wait for elements in the replicationQueue while (alive() && replicationQueue != null && replicationQueue.size() == 0 ) { try { Thread.sleep(asynchronousReplicationInterval); } catch (InterruptedException e) { LOG.debug( "Spool Thread interrupted." ); return ; } } if (notAlive()) { return ; } try { if (replicationQueue.size() != 0 ) { flushReplicationQueue(); } } catch (Throwable e) { LOG.error( "Exception on flushing of replication queue: " + e.getMessage() + ". Continuing..." , e); } } } private void flushReplicationQueue() { List replicationQueueCopy; synchronized (replicationQueue) { if (replicationQueue.size() == 0 ) { return ; } replicationQueueCopy = new ArrayList(replicationQueue); replicationQueue.clear(); } Ehcache cache = ((CacheEventMessage) replicationQueueCopy.get( 0 )).cache; List cachePeers = listRemoteCachePeers(cache); List resolvedEventMessages = extractAndResolveEventMessages(replicationQueueCopy); for ( int j = 0 ; j < cachePeers.size(); j++) { CachePeer cachePeer = (CachePeer) cachePeers.get(j); try { cachePeer.send(resolvedEventMessages); } catch (UnmarshalException e) { String message = e.getMessage(); if (message.indexOf( "Read time out" ) != 0 ) { LOG.warn( "Unable to send message to remote peer due to socket read timeout. Consider increasing" + " the socketTimeoutMillis setting in the cacheManagerPeerListenerFactory. " + "Message was: " + e.getMessage()); } else { LOG.debug( "Unable to send message to remote peer. Message was: " + e.getMessage()); } } catch (Throwable t) { LOG.warn( "Unable to send message to remote peer. Message was: " + t.getMessage(), t); } } if (LOG.isWarnEnabled()) { int eventMessagesNotResolved = replicationQueueCopy.size() - resolvedEventMessages.size(); if (eventMessagesNotResolved > 0 ) { LOG.warn(eventMessagesNotResolved + " messages were discarded on replicate due to reclamation of " + "SoftReferences by the VM. Consider increasing the maximum heap size and/or setting the " + "starting heap size to a higher value." ); } } } |