[ https://issues.apache.org/jira/browse/TS-4723?focusedWorklogId=26893&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-26893 ]
ASF GitHub Bot logged work on TS-4723: -------------------------------------- Author: ASF GitHub Bot Created on: 23/Aug/16 12:20 Start Date: 23/Aug/16 12:20 Worklog Time Spent: 10m Work Description: Github user SolidWallOfCode commented on a diff in the pull request: https://github.com/apache/trafficserver/pull/843#discussion_r75853615 --- Diff: plugins/experimental/carp/carp.cc --- @@ -0,0 +1,713 @@ +/** @file + + A brief file description + + @section license License + + Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + */ + +#include <errno.h> + +#include <string> +#include <sstream> +#include <stdlib.h> +#include <stdio.h> +#include <sys/stat.h> +#include <sys/types.h> +#include <unistd.h> + +#include <memory.h> + +#include <ts/ts.h> + +#include "Common.h" +#include "CarpConfig.h" +#include "CarpConfigPool.h" +#include "CarpHashAlgorithm.h" +#include "UrlComponents.h" + +using namespace std; + +CarpConfigPool* g_CarpConfigPool = NULL; +int g_carpSelectedHostArgIndex = 0; +TSTextLogObject g_logObject = NULL; + +const char *logFileName = "carp"; + +////////////////////////////////////////////////////////////////////////////// +////////////////////////////////////////////////////////////////////////////// +/* + check for our carp routed header, dump status if requested + */ +static int +processCarpRoutedHeader(TSHttpTxn txnp, TSMBuffer bufp, TSMLoc hdr_loc) +{ + string value; + if (getHeader(bufp, hdr_loc, CARP_ROUTED_HEADER, value)) { // if found header + if (value.compare("1") == 0) { // is loop prevention value + TSDebug(DEBUG_TAG_HOOK, "Found %s header with loop prevention value, not forwarding again", CARP_ROUTED_HEADER.c_str()); + return 0; + } else if (value.compare("dump") == 0) { // is dump status request + TSDebug(DEBUG_TAG_HOOK, "Found %s header with dump request", CARP_ROUTED_HEADER.c_str()); + string status; + g_CarpConfigPool->getGlobalHashAlgo()->dump(status); + TSHttpTxnSetHttpRetStatus(txnp, TS_HTTP_STATUS_MULTI_STATUS); + TSHttpTxnErrorBodySet(txnp, TSstrdup(status.c_str()), status.length(), NULL); + return -1; + } + TSDebug(DEBUG_TAG_HOOK, "Found %s header with unknown value of %s, ignoring", CARP_ROUTED_HEADER.c_str(), value.c_str()); + removeHeader(bufp, hdr_loc, CARP_ROUTED_HEADER); + } + return 1; // all OK +} + +static bool +checkListForSelf(std::vector<HashNode *> list) +{ + for (size_t k = 0; k < list.size(); k++) { + if (list[k]->isSelf) return true; + } + return false; +} + +/** + bIsPOSTRemap = false --- Hash request and forward to peer + bIsPOSTRemap = true --- hash request, extract OS sockaddr, insert forwarding header, forward + */ +static int +handleRequestProcessing(TSCont contp, TSEvent event, void *edata, bool bIsPOSTRemap) +{ + TSHttpTxn txnp = (TSHttpTxn) edata; + TSMBuffer bufp; + TSMLoc hdr_loc; + TSMLoc url_loc; + + // get the client request so we can get URL and add header + if (TSHttpTxnClientReqGet(txnp, &bufp, &hdr_loc) != TS_SUCCESS) { + TSError("carp couldn't get request headers"); + return -1; + } + + int method_len; + const char *method = TSHttpHdrMethodGet(bufp, hdr_loc, &method_len); + if (NULL == method) { + TSError("carp couldn't get http method"); + TSHandleMLocRelease(bufp, TS_NULL_MLOC, hdr_loc); + return -1; + } + if (((method_len == TS_HTTP_LEN_DELETE) && (strncasecmp(method, TS_HTTP_METHOD_DELETE, TS_HTTP_LEN_DELETE) == 0)) || + ((method_len == TS_HTTP_LEN_PURGE) && (strncasecmp(method, TS_HTTP_METHOD_PURGE, TS_HTTP_LEN_PURGE) == 0))) { + TSDebug(DEBUG_TAG_HOOK, "Request method is '%s' so not routing request", string(method,method_len).c_str()); + TSHandleMLocRelease(bufp, TS_NULL_MLOC, hdr_loc); + return 0; + } + + if (TSHttpHdrUrlGet(bufp, hdr_loc, &url_loc) != TS_SUCCESS) { + TSError("carp couldn't get url"); + TSHandleMLocRelease(bufp, TS_NULL_MLOC, hdr_loc); + return -1; + } + // if has carp loop prevention header, do not remap + int iTemp = processCarpRoutedHeader(txnp, bufp, hdr_loc); + if(iTemp <= 0) { // if dump or do not remap + // check origin client request's scheme for premap mode + if (!bIsPOSTRemap) { + string oriScheme; + if (!getHeader(bufp, hdr_loc, CARP_PREMAP_SCHEME, oriScheme)) { + TSDebug(DEBUG_TAG_HOOK, "couldn't get '%s' header", CARP_PREMAP_SCHEME.c_str()); + } else { + bool isHttps = (oriScheme == TS_URL_SCHEME_HTTPS); + + if (isHttps) { + TSUrlSchemeSet(bufp, url_loc, TS_URL_SCHEME_HTTPS, TS_URL_LEN_HTTPS); + } else { + TSUrlSchemeSet(bufp, url_loc, TS_URL_SCHEME_HTTP, TS_URL_LEN_HTTP); + } + + removeHeader(bufp, hdr_loc, CARP_STATUS_HEADER); + removeHeader(bufp, hdr_loc, CARP_ROUTED_HEADER); + removeHeader(bufp, hdr_loc, CARP_PREMAP_SCHEME.c_str()); + TSDebug(DEBUG_TAG_HOOK, "Set client request's scheme to %s through %s header", + isHttps?"https":"http", CARP_PREMAP_SCHEME.c_str()); + } + } else { + removeHeader(bufp, hdr_loc, CARP_ROUTED_HEADER); + } + TSHandleMLocRelease(bufp, TS_NULL_MLOC, hdr_loc); + return iTemp; + } + + UrlComponents reqUrl; + reqUrl.populate(bufp, url_loc); + // the url ONLY used to determine the cache owner + string sUrl; + + if (!bIsPOSTRemap) { // if pre-remap, then host not in URL so get from header + string sHost; + if (!getHeader(bufp, hdr_loc, TS_MIME_FIELD_HOST, sHost)) { + TSDebug(DEBUG_TAG_HOOK, "Could not find host header, ignoring it"); + } + reqUrl.setHost(sHost); --- End diff -- Shouldn't this be in `else` if it's being ignored? Issue Time Tracking ------------------- Worklog Id: (was: 26893) Time Spent: 4h 20m (was: 4h 10m) > ATS CARP Plugin > --------------- > > Key: TS-4723 > URL: https://issues.apache.org/jira/browse/TS-4723 > Project: Traffic Server > Issue Type: New Feature > Components: Plugins > Reporter: Eric Schwartz > Assignee: Eric Schwartz > Fix For: 7.0.0 > > Time Spent: 4h 20m > Remaining Estimate: 0h > > Open sourcing this plugin we use internally within Yahoo in place of > hierarchical caching. > CARP is a plugin that allows you to group a bunch of ATS hosts into a cluster > and share cache space across the entire group. This is done with consistent > hashing on the object URL to generate an "owner" node in the cluster. > Requests to any other node in the cluster will be forwarded on to the > corresponding owner. More info in the README. > Difference from internal version of note: > I've ripped out some code we weren't entirely sure we could open source > because of a hash function. If it turns out that we can open source this, > I'll do so. The CarpHashAlgorithm class is meant to be extensible, so any > consistent hash function can replace it. The function included here is pretty > straightforward but not what we use in production, so just wanted to use that > caveat. > One last caveat: > You'll see some code and documentation in here for object replication. This > is something I added recently to CARP that allows you to specify an object be > replicated a certain number of times in the cluster. This is useful if you > have a network partition or if you're performing some sort of update. When an > object's primary owner is unreachable, a node in the cluster can go to the > secondary owner if it's available rather than having to fall all the way back > to origin. While I've done some initial testing on this with my own cluster > of hosts, it's not been tested in production so use at your own risk for now. > I'll be sure to keep the open source community informed on the progress of > our tests with this feature. -- This message was sent by Atlassian JIRA (v6.3.4#6332)