[ 
https://issues.apache.org/jira/browse/TS-4723?focusedWorklogId=26279&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-26279
 ]

ASF GitHub Bot logged work on TS-4723:
--------------------------------------

                Author: ASF GitHub Bot
            Created on: 10/Aug/16 15:02
            Start Date: 10/Aug/16 15:02
    Worklog Time Spent: 10m 
      Work Description: Github user SolidWallOfCode commented on a diff in the 
pull request:

    https://github.com/apache/trafficserver/pull/843#discussion_r74262139
  
    --- Diff: plugins/experimental/carp/CarpConfigPool.cc ---
    @@ -0,0 +1,218 @@
    +/** @file
    +
    +  Manage a list of CARP configurations
    +
    +  @section license License
    +
    +  Licensed to the Apache Software Foundation (ASF) under one
    +  or more contributor license agreements.  See the NOTICE file
    +  distributed with this work for additional information
    +  regarding copyright ownership.  The ASF licenses this file
    +  to you under the Apache License, Version 2.0 (the
    +  "License"); you may not use this file except in compliance
    +  with the License.  You may obtain a copy of the License at
    +
    +      http://www.apache.org/licenses/LICENSE-2.0
    +
    +  Unless required by applicable law or agreed to in writing, software
    +  distributed under the License is distributed on an "AS IS" BASIS,
    +  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +  See the License for the specific language governing permissions and
    +  limitations under the License.
    + */
    +#include <sstream>
    +
    +#include "Common.h"
    +#include "CarpConfigPool.h"
    +#include "UrlComponents.h"
    +#include "CarpHost.h"
    +
    +#include <netdb.h>
    +#include <ts/ts.h>
    + 
    +using namespace std;
    +
    +/*******************************************************************/
    +CarpConfigPool::CarpConfigPool()
    +{
    +  _globalHash = NULL;
    +  _globalConfig = NULL;
    +}
    +
    +/*******************************************************************/
    +CarpConfigPool::~CarpConfigPool()
    +{
    +  for(CarpConfigListIter it=_configList.begin(); it != _configList.end(); 
it++) {
    +    it->second->_config->stop();
    +    delete it->second;
    +  }
    +}
    +/*******************************************************************/
    +static int initCarpConfigAndHash(CarpConfigAndHash * cch, string 
sFilename) {
    +  cch->_configPath = sFilename;
    +  cch->_config = new CarpConfig();
    +  TSAssert(cch->_config);
    +
    +  if (!cch->_config->loadConfig(sFilename)) {
    +    return -1;
    +  }
    +
    +  cch->_hashAlgo = new CarpHashAlgorithm(cch->_config);
    +
    +  TSAssert(cch->_hashAlgo);
    +
    +  CarpHostList* hostList = cch->_config->getHostList();
    +  // add hosts, etc to hash algo
    +  char szServerName[256];
    +  *szServerName = 0;
    +
    +  if (!gethostname(szServerName, 255)) { // success!
    +    TSDebug(DEBUG_TAG_INIT, "using %s as server name to detect 'self'",
    +        szServerName);
    +  }
    +  char buf[1024];
    +  struct hostent self, *selfhe = getHostIp(string(szServerName), &self, 
buf,
    +      sizeof(buf));
    +
    +  for (CarpHostListIter i = hostList->begin(); i != hostList->end(); i++) {
    +    HashNode *hashNode;
    +    bool bSelf = false;
    +    if (NULL != selfhe) { // check for 'self'
    +      // include hostname and port
    +      bSelf = isSelf((*i)->getName(), (*i)->getPort(), selfhe);
    +    }
    +    if (cch->_config->getHealthCheckPort() == -1) {        // did they 
specify 'PORT'?
    +      (*i)->setHealthCheckPort((*i)->getPort()); // set HC port from 
server spec'd port
    +    } else {
    +      (*i)->setHealthCheckPort(cch->_config->getHealthCheckPort()); // set 
HC port
    +    }
    +    string sHCUrl = cch->_config->getHealthCheckUrl();
    +    size_t pos = sHCUrl.find("{port}");
    +    if (pos != string::npos) { // need to replace '{port}' with servers 
port
    +      stringstream ss;
    +      ss << (*i)->getPort();
    +      sHCUrl.replace(pos, 6, ss.str()); // 6 = strlen of '{port}'
    +    }
    +    pos = sHCUrl.find("{host}");
    +    if (pos != string::npos) {
    +      sHCUrl.replace(pos, 6, (*i)->getName());
    +    }
    +    (*i)->setHealthCheckUrl(sHCUrl); // set HC Url
    +
    +    // Look up host and create addr struct for healthchecks
    +    char hBuf[1024];
    +    struct hostent host, *hosthe = getHostIp((*i)->getName(), &host, hBuf,
    +        sizeof(hBuf));
    +    if (hosthe) {
    +      // convert hostent to sockaddr_in structure
    +      sockaddr_in hIn;
    +      memcpy(&hIn.sin_addr, hosthe->h_addr_list[0], hosthe->h_length);
    +      hIn.sin_port = htons((*i)->getHealthCheckPort()); // set port
    +      if (hosthe->h_length == 4) { // size match IPv4? assume such
    +        hIn.sin_family = AF_INET;
    +      } else { // assume IPv6
    +        hIn.sin_family = AF_INET6;
    +      }
    +      (*i)->setHealthCheckAddr(
    +          reinterpret_cast<struct sockaddr_storage &>(hIn));
    +      hIn.sin_port = htons((*i)->getPort());
    +      hashNode = new HashNode((*i)->getName(), (*i)->getPort(),
    +          (*i)->getScheme(), (*i)->getWeight(), bSelf,
    +          reinterpret_cast<struct sockaddr_storage &>(hIn),
    +          (*i)->getGroup());
    +      cch->_hashAlgo->addHost(hashNode);
    +    } else {
    +      //Config error or dns error. Should not continue
    +      TSError("carp: error get peer address of host '%s'", 
(*i)->getName().c_str());
    +      return -1;
    +    }
    +
    +    HttpFetch *f = new HttpFetch(sHCUrl, cch->_hashAlgo, hashNode);
    +    cch->_config->addHealthCheckClient(f);
    +  }
    +  string diag;
    +  cch->_config->dump(diag);
    +  TSDebug(DEBUG_TAG_INIT, "Carp Configuration\n%s", diag.c_str());
    +
    +  // tell algo we are done configuring it
    +  cch->_hashAlgo->algoInit();
    +
    +  return 1;
    +}
    +
    +/*******************************************************************/
    +int
    +cleanHandler(TSCont cont, TSEvent event, void *edata) {
    +  CarpConfigAndHash * cch = (CarpConfigAndHash *)TSContDataGet(cont);
    +  delete cch;
    +  TSContDestroy(cont);
    +
    +  return 1;
    +}
    +
    +/*******************************************************************/
    +CarpConfigAndHash*
    +CarpConfigPool::processConfigFile(string sFilename,bool isGlobal)
    +{
    +
    +  CarpConfigAndHash* cch = _configList[sFilename];
    +
    +  if ( NULL == cch ) { // new config file
    +    cch = new CarpConfigAndHash();
    +    _configList[sFilename] = cch;
    +    TSDebug(DEBUG_TAG_INIT, "processing new config file '%s'", 
sFilename.c_str());
    +    if (initCarpConfigAndHash(cch, sFilename) < 0 ) {
    +           return NULL;
    +    }
    +
    +    if(isGlobal) { // extract global setting(s) from this config file and 
save locally
    +      _globalHash = cch->_hashAlgo;
    +      _globalConfig = cch->_config;
    +    }
    +    // create and start health watcher thread
    +    cch->_thread = TSThreadCreate( 
CarpConfigHealthCheckThreadStart,static_cast<void *> (cch) );
    +  } else { // config reload
    +
    +     TSDebug(DEBUG_TAG_HEALTH, "Reload the config file '%s'", 
sFilename.c_str());
    +     CarpConfigAndHash *newCCH = new CarpConfigAndHash();
    +     CarpConfigAndHash *oldCCH = cch;
    +     if (initCarpConfigAndHash(newCCH, sFilename) < 0 ) {
    +        return NULL;
    +     }
    +     /*
    +      * Find the status of current Host in previous HashAlgo, and
    +      * then assign the value to the new HashAlgo.
    +      */
    +     size_t index;
    +     vector<CarpHost*> *list = newCCH->_config->getHostList();
    +     for (unsigned int i = 0; i < list->size(); i++) {
    +        string name = (*list)[i]->getName();
    --- End diff --
    
    Any reason for this to not be `string const&`?


Issue Time Tracking
-------------------

    Worklog Id:     (was: 26279)
    Time Spent: 2.5h  (was: 2h 20m)

> ATS CARP Plugin
> ---------------
>
>                 Key: TS-4723
>                 URL: https://issues.apache.org/jira/browse/TS-4723
>             Project: Traffic Server
>          Issue Type: New Feature
>          Components: Plugins
>            Reporter: Eric Schwartz
>            Assignee: Eric Schwartz
>             Fix For: 7.0.0
>
>          Time Spent: 2.5h
>  Remaining Estimate: 0h
>
> Open sourcing this plugin we use internally within Yahoo in place of 
> hierarchical caching.
> CARP is a plugin that allows you to group a bunch of ATS hosts into a cluster 
> and share cache space across the entire group. This is done with consistent 
> hashing on the object URL to generate an "owner" node in the cluster. 
> Requests to any other node in the cluster will be forwarded on to the 
> corresponding owner. More info in the README.
> Difference from internal version of note:
> I've ripped out some code we weren't entirely sure we could open source 
> because of a hash function. If it turns out that we can open source this, 
> I'll do so. The CarpHashAlgorithm class is meant to be extensible, so any 
> consistent hash function can replace it. The function included here is pretty 
> straightforward but not what we use in production, so just wanted to use that 
> caveat.
> One last caveat:
> You'll see some code and documentation in here for object replication. This 
> is something I added recently to CARP that allows you to specify an object be 
> replicated a certain number of times in the cluster. This is useful if you 
> have a network partition or if you're performing some sort of update. When an 
> object's primary owner is unreachable, a node in the cluster can go to the 
> secondary owner if it's available rather than having to fall all the way back 
> to origin. While I've done some initial testing on this with my own cluster 
> of hosts, it's not been tested in production so use at your own risk for now. 
> I'll be sure to keep the open source community informed on the progress of 
> our tests with this feature.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

Reply via email to