Sn0rt commented on code in PR #665:
URL: https://github.com/apache/apisix-helm-chart/pull/665#discussion_r1396667512
##########
docs/en/latest/FAQ.md:
##########
@@ -39,260 +39,7 @@ helm install apisix apisix/apisix \
### How to install Apache APISIX running in standalone mode?
-Helm chart does not provide a direct way to deploy Apache APISIX running in
standalone mode. You can install it in the following manner.
-
-Create a `deploy.yaml` with the following content.
-
-<Tabs
- groupId="version"
- defaultValue="3.0.0-beta"
- values={[
- {label: '3.0.0-beta', value: '3.0.0-beta'},
- {label: '2.15', value: '2.15'},
- ]}>
-
-<TabItem value="3.0.0-beta">
-
-```yaml
-# deploy.yaml
-kind: ConfigMap
-apiVersion: v1
-metadata:
- name: apisix-gw-config.yaml
-data:
- config.yaml: |
- deployment:
- role: data_plane
- role_data_plane:
- config_provider: yaml
----
-kind: ConfigMap
-apiVersion: v1
-metadata:
- name: apisix.yaml
-data:
- apisix.yaml: |
- routes:
- -
- uri: /hi
- upstream:
- nodes:
- "127.0.0.1:1980": 1
- type: roundrobin
- #END
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: apisix-deployment
-spec:
- replicas: 1
- selector:
- matchLabels:
- app: apisix-deployment
- strategy:
- rollingUpdate:
- maxSurge: 50%
- maxUnavailable: 1
- type: RollingUpdate
- template:
- metadata:
- labels:
- app: apisix-deployment
- spec:
- terminationGracePeriodSeconds: 0
- containers:
- - livenessProbe:
- failureThreshold: 3
- initialDelaySeconds: 1
- periodSeconds: 2
- successThreshold: 1
- tcpSocket:
- port: 9080
- timeoutSeconds: 2
- readinessProbe:
- failureThreshold: 3
- initialDelaySeconds: 1
- periodSeconds: 2
- successThreshold: 1
- tcpSocket:
- port: 9080
- timeoutSeconds: 2
- image: "apache/apisix:2.99.0-centos"
- imagePullPolicy: IfNotPresent
- name: apisix-deployment
- # Create a soft link to link the apisix.yaml file in the mount
directory to /usr/local/apisix/conf/apisix.yaml.
- command: ["sh", "-c","ln -s /apisix-config/apisix.yaml
/usr/local/apisix/conf/apisix.yaml && /docker-entrypoint.sh docker-start"]
- ports:
- - containerPort: 9080
- name: "http"
- protocol: "TCP"
- - containerPort: 9443
- name: "https"
- protocol: "TCP"
-
- volumeMounts:
- - mountPath: /usr/local/apisix/conf/config.yaml
- name: apisix-config-yaml-configmap
- subPath: config.yaml
- # configMap directory mounts
- - mountPath: /apisix-config
- name: apisix-admin
- volumes:
- - configMap:
- name: apisix-gw-config.yaml
- name: apisix-config-yaml-configmap
- - configMap:
- name: apisix.yaml
- name: apisix-admin
----
-apiVersion: v1
-kind: Service
-metadata:
- name: apisix-service
-spec:
- selector:
- app: apisix-deployment
- ports:
- - name: http
- port: 9080
- protocol: TCP
- targetPort: 9080
- - name: https
- port: 9443
- protocol: TCP
- targetPort: 9443
- type: NodePort
-```
-
-</TabItem>
-
-<TabItem value="2.15">
-
-```yaml
-# deploy.yaml
-kind: ConfigMap
-apiVersion: v1
-metadata:
- name: apisix-gw-config.yaml
-data:
- config.yaml: |
- apisix:
- enable_admin: false
- config_center: yaml
----
-kind: ConfigMap
-apiVersion: v1
-metadata:
- name: apisix.yaml
-data:
- apisix.yaml: |
- routes:
- -
- uri: /hi
- upstream:
- nodes:
- "127.0.0.1:1980": 1
- type: roundrobin
- #END
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: apisix-deployment
-spec:
- replicas: 1
- selector:
- matchLabels:
- app: apisix-deployment
- strategy:
- rollingUpdate:
- maxSurge: 50%
- maxUnavailable: 1
- type: RollingUpdate
- template:
- metadata:
- labels:
- app: apisix-deployment
- spec:
- terminationGracePeriodSeconds: 0
- containers:
- - livenessProbe:
- failureThreshold: 3
- initialDelaySeconds: 1
- periodSeconds: 2
- successThreshold: 1
- tcpSocket:
- port: 9080
- timeoutSeconds: 2
- readinessProbe:
- failureThreshold: 3
- initialDelaySeconds: 1
- periodSeconds: 2
- successThreshold: 1
- tcpSocket:
- port: 9080
- timeoutSeconds: 2
- image: "apache/apisix:2.15.0-alpine"
- imagePullPolicy: IfNotPresent
- name: apisix-deployment
- # Create a soft link to link the apisix.yaml file in the mount
directory to /usr/local/apisix/conf/apisix.yaml.
- command: ["sh", "-c", "ln -s /apisix-config/apisix.yaml
/usr/local/apisix/conf/apisix.yaml && /usr/bin/apisix init && /usr/bin/apisix
init_etcd && /usr/local/openresty/bin/openresty -p /usr/local/apisix -g 'daemon
off;'"]
- ports:
- - containerPort: 9080
- name: "http"
- protocol: "TCP"
- - containerPort: 9443
- name: "https"
- protocol: "TCP"
- volumeMounts:
- - mountPath: /usr/local/apisix/conf/config.yaml
- name: apisix-config-yaml-configmap
- subPath: config.yaml
- # configMap directory mounts
- - mountPath: /apisix-config
- name: apisix-admin
- volumes:
- - configMap:
- name: apisix-gw-config.yaml
- name: apisix-config-yaml-configmap
- - configMap:
- name: apisix.yaml
- name: apisix-admin
----
-apiVersion: v1
-kind: Service
-metadata:
- name: apisix-service
-spec:
- selector:
- app: apisix-deployment
- ports:
- - name: http
- port: 9080
- protocol: TCP
- targetPort: 9080
- - name: https
- port: 9443
- protocol: TCP
- targetPort: 9443
- type: NodePort
-```
-
-</TabItem>
-
-</Tabs>
-
-Apply the configuration in `deploy.yaml` to pod.
-
-```shell
-kubectl apply -f deploy.yaml
-```
-
-:::note
-1. The mount of the `apisix.yaml` file requires the injection of the softlink
command, so do not change the configMap mount directory to
`/usr/local/apisix/conf`, to avoid other configuration files being overwritten.
-2. The `apisix.yaml` is mounted as a configMap, so there will be a delay in
reloading the rules after `apisix.yaml` is changed; please refer to this
[document](https://kubernetes.io/docs/concepts/configuration/configmap/#mounted-configmaps-are-updated-automatically)
for details.
-:::
+helm install apisix apisix/apisix --set deployment.mode=standalone --set
etcd.enabled=false --set deployment.role=data_plane
Review Comment:
if the user etcd.enabled=true , what will happend ?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]