[
https://issues.apache.org/jira/browse/GEODE-1874?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
]
Hitesh Khamesra closed GEODE-1874.
----------------------------------
> setNextNeighbor method allocates a HashSet on every p2p message received
> ------------------------------------------------------------------------
>
> Key: GEODE-1874
> URL: https://issues.apache.org/jira/browse/GEODE-1874
> Project: Geode
> Issue Type: Bug
> Components: messaging
> Reporter: Darrel Schneider
> Assignee: Udo Kohlmeyer
> Fix For: 1.1.0
>
>
> Every time a peer receives a message it ends up calling setNextNeighbor in
> com.gemstone.gemfire.distributed.internal.membership.gms.fd.GMSHealthMonitor.
> This method always allocates and add all the existing members to it just so
> it can figure out if all other members are suspected.
> I was doing performance analysis of a PR get test and this HashSet resulted
> in a 10% decrease in the put rate.
> It is not clear why setNextNeighbor is even being called by contactedBy since
> the actual view did not change. So one possible fix would be to not call
> setNextNeighbor from contactedBy.
> But here is an optimization of setNextNeighbor that prevents any work from
> being done if the sizes indicate that not all other members are suspects.
> When it does need to check the new code just reads the existing state instead
> of allocating and adding to a new HashSet:
> {noformat}
> diff --git
> a/geode-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/fd/GMSHealthMonitor.java
>
> b/geode-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/fd/GMSHealthMonitor.java
> index 9fdbb64..fcb2826 100644
> ---
> a/geode-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/fd/GMSHealthMonitor.java
> +++
> b/geode-core/src/main/java/com/gemstone/gemfire/distributed/internal/membership/gms/fd/GMSHealthMonitor.java
> @@ -818,13 +818,22 @@ public class GMSHealthMonitor implements HealthMonitor,
> MessageHandler {
>
> List<InternalDistributedMember> allMembers = newView.getMembers();
>
> - Set<InternalDistributedMember> checkAllSuspected = new
> HashSet<>(allMembers);
> - checkAllSuspected.removeAll(suspectedMemberInView.keySet());
> - checkAllSuspected.remove(localAddress);
> - if (checkAllSuspected.isEmpty() && allMembers.size() > 1) {
> - logger.info("All other members are suspect at this point");
> - nextNeighbor = null;
> - return;
> + if (allMembers.size() > 1 && suspectedMemberInView.size() >=
> allMembers.size()-1) {
> + boolean nonSuspectFound = false;
> + for (InternalDistributedMember member: allMembers) {
> + if (member.equals(localAddress)) {
> + continue;
> + }
> + if (!suspectedMemberInView.containsKey(member)) {
> + nonSuspectFound = true;
> + break;
> + }
> + }
> + if (!nonSuspectFound) {
> + logger.info("All other members are suspect at this point");
> + nextNeighbor = null;
> + return;
> + }
> }
>
> int index = allMembers.indexOf(nextTo);
> {noformat}
--
This message was sent by Atlassian JIRA
(v6.3.15#6346)