This is an automated email from the ASF dual-hosted git repository.

mgrigorov pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/avro.git


The following commit(s) were added to refs/heads/main by this push:
     new f694d2453 Add website docs for 1.11.4
f694d2453 is described below

commit f694d24532db956fccae0435a8e2f725989841a6
Author: Martin Tzvetanov Grigorov <[email protected]>
AuthorDate: Fri Sep 27 09:55:38 2024 +0300

    Add website docs for 1.11.4
    
    Signed-off-by: Martin Tzvetanov Grigorov <[email protected]>
---
 doc/config.toml                                    |   4 +
 doc/content/en/avro.rdf                            |   6 +-
 .../en/blog/releases/avro-1.11.4-released.md       |  49 ++
 .../docs/1.11.4/Getting started (Java)/_index.md   | 289 +++++++
 .../docs/1.11.4/Getting started (Python)/_index.md | 147 ++++
 doc/content/en/docs/1.11.4/IDL Language/_index.md  | 435 +++++++++++
 .../en/docs/1.11.4/MapReduce guide/_index.md       | 396 ++++++++++
 doc/content/en/docs/1.11.4/SASL profile/_index.md  |  93 +++
 doc/content/en/docs/1.11.4/Specification/_index.md | 848 +++++++++++++++++++++
 doc/content/en/docs/1.11.4/_index.md               |  59 ++
 doc/content/en/docs/1.11.4/api-c++.md              |  29 +
 doc/content/en/docs/1.11.4/api-c.md                |  29 +
 doc/content/en/docs/1.11.4/api-csharp.md           |  29 +
 doc/content/en/docs/1.11.4/api-java.md             |  29 +
 doc/content/en/docs/1.11.4/api-py.md               |  29 +
 doc/content/en/docs/1.11.4/logo.svg                |  22 +
 doc/examples/java-example/pom.xml                  |   2 +-
 doc/examples/mr-example/pom.xml                    |   2 +-
 18 files changed, 2492 insertions(+), 5 deletions(-)

diff --git a/doc/config.toml b/doc/config.toml
index 8f5c40db0..f53f8e412 100644
--- a/doc/config.toml
+++ b/doc/config.toml
@@ -259,6 +259,10 @@ url = "http://www.apache.org/security/";
 version = "1.12.0"
 url = "https://avro.apache.org/docs/1.12.0/";
 
+[[params.versions]]
+version = "1.11.4"
+url = "https://avro.apache.org/docs/1.11.4/";
+
 [[params.versions]]
 version = "1.11.3"
 url = "https://avro.apache.org/docs/1.11.3/";
diff --git a/doc/content/en/avro.rdf b/doc/content/en/avro.rdf
index 0c3b6adcd..ba44ba098 100644
--- a/doc/content/en/avro.rdf
+++ b/doc/content/en/avro.rdf
@@ -52,9 +52,9 @@
     -->
     <release>
       <Version>
-        <name>Avro 1.11.3</name>
-        <created>2023-09-17</created>
-        <revision>1.11.3</revision>
+        <name>Avro 1.12.0</name>
+        <created>2024-08-05</created>
+        <revision>1.12.0</revision>
       </Version>
     </release>
     <repository>
diff --git a/doc/content/en/blog/releases/avro-1.11.4-released.md 
b/doc/content/en/blog/releases/avro-1.11.4-released.md
new file mode 100755
index 000000000..c5f36146d
--- /dev/null
+++ b/doc/content/en/blog/releases/avro-1.11.4-released.md
@@ -0,0 +1,49 @@
+---
+title: "Avro 1.11.4"
+linkTitle: "Avro 1.11.4"
+date: 2024-09-22
+---
+
+<!--
+
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+   https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+
+-->
+
+The Apache Avro community is pleased to announce the release of Avro 1.11.4!
+
+All signed release artifacts, signatures and verification instructions can
+be found <a href="{{< relref "/project/download" >}}">here</a>
+
+This release [addresses 4 Jira 
issues](https://issues.apache.org/jira/issues/?jql=project%3DAVRO%20AND%20fixVersion%3D1.11.4)
 
+only in the Java SDK. All other SDKs have no difference to their 1.12.0 
release, so please use 1.12.0 for them!
+
+## Highlights
+
+Java
+- [AVRO-3985](https://issues.apache.org/jira/browse/AVRO-3985): Restrict 
trusted packages in ReflectData and SpecificData
+- [AVRO-3989](https://issues.apache.org/jira/browse/AVRO-3989): Maven Plugin 
Always Recompiles IDL Files
+- [AVRO-3880](https://issues.apache.org/jira/browse/AVRO-3880): Upgrade 
maven-antrun-plugin to 3.1.0
+- [AVRO-3748](https://issues.apache.org/jira/browse/AVRO-3748): issue with 
DataFileSeekableInput.SeekableInputStream.skip
+
+
+## Language SDK / Convenience artifacts
+
+* Java: https://repo1.maven.org/maven2/org/apache/avro/avro/1.11.4/
+
+Thanks to everyone for contributing!
diff --git a/doc/content/en/docs/1.11.4/Getting started (Java)/_index.md 
b/doc/content/en/docs/1.11.4/Getting started (Java)/_index.md
new file mode 100644
index 000000000..a470a0291
--- /dev/null
+++ b/doc/content/en/docs/1.11.4/Getting started (Java)/_index.md       
@@ -0,0 +1,289 @@
+---
+categories: []
+tags: ["java"] 
+title: "Getting Started (Java)"
+linkTitle: "Getting Started (Java)"
+weight: 2
+---
+
+<!--
+
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+   https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+
+-->
+
+This is a short guide for getting started with Apache Avro™ using Java. This 
guide only covers using Avro for data serialization; see Patrick Hunt's [Avro 
RPC Quick Start](https://github.com/phunt/avro-rpc-quickstart) for a good 
introduction to using Avro for RPC.
+
+## Download
+
+Avro implementations for C, C++, C#, Java, PHP, Python, and Ruby can be 
downloaded from the [Apache Avro™ Download]({{< relref "/project/download" >}}) 
page. This guide uses Avro 1.11.4, the latest version at the time of writing. 
For the examples in this guide, download avro-1.11.4.jar and 
avro-tools-1.11.4.jar.
+
+Alternatively, if you are using Maven, add the following dependency to your 
POM:
+
+```xml
+<dependency>
+  <groupId>org.apache.avro</groupId>
+  <artifactId>avro</artifactId>
+  <version>1.11.4</version>
+</dependency>
+```
+
+As well as the Avro Maven plugin (for performing code generation):
+
+```xml
+<plugin>
+  <groupId>org.apache.avro</groupId>
+  <artifactId>avro-maven-plugin</artifactId>
+  <version>1.11.4</version>
+  <executions>
+    <execution>
+      <phase>generate-sources</phase>
+      <goals>
+        <goal>schema</goal>
+      </goals>
+      <configuration>
+        <sourceDirectory>${project.basedir}/src/main/avro/</sourceDirectory>
+        <outputDirectory>${project.basedir}/src/main/java/</outputDirectory>
+      </configuration>
+    </execution>
+  </executions>
+</plugin>
+<plugin>
+  <groupId>org.apache.maven.plugins</groupId>
+  <artifactId>maven-compiler-plugin</artifactId>
+  <configuration>
+    <source>1.8</source>
+    <target>1.8</target>
+  </configuration>
+</plugin>
+```
+      
+You may also build the required Avro jars from source. Building Avro is beyond 
the scope of this guide; see the Build Documentation page in the wiki for more 
information.
+
+## Defining a schema
+
+Avro schemas are defined using JSON. Schemas are composed of primitive types 
(null, boolean, int, long, float, double, bytes, and string) and complex types 
(record, enum, array, map, union, and fixed). You can learn more about Avro 
schemas and types from the specification, but for now let's start with a simple 
schema example, user.avsc:
+
+```json
+{"namespace": "example.avro",
+ "type": "record",
+ "name": "User",
+ "fields": [
+     {"name": "name", "type": "string"},
+     {"name": "favorite_number",  "type": ["int", "null"]},
+     {"name": "favorite_color", "type": ["string", "null"]}
+ ]
+}
+```
+      
+This schema defines a record representing a hypothetical user. (Note that a 
schema file can only contain a single schema definition.) At minimum, a record 
definition must include its type ("type": "record"), a name ("name": "User"), 
and fields, in this case name, favorite_number, and favorite_color. We also 
define a namespace ("namespace": "example.avro"), which together with the name 
attribute defines the "full name" of the schema (example.avro.User in this 
case).
+
+Fields are defined via an array of objects, each of which defines a name and 
type (other attributes are optional, see the record specification for more 
details). The type attribute of a field is another schema object, which can be 
either a primitive or complex type. For example, the name field of our User 
schema is the primitive type string, whereas the favorite_number and 
favorite_color fields are both unions, represented by JSON arrays. unions are a 
complex type that can be any of the  [...]
+
+## Serializing and deserializing with code generation
+
+### Compiling the schema
+Code generation allows us to automatically create classes based on our 
previously-defined schema. Once we have defined the relevant classes, there is 
no need to use the schema directly in our programs. We use the avro-tools jar 
to generate code as follows:
+
+```shell
+java -jar /path/to/avro-tools-1.11.4.jar compile schema <schema file> 
<destination>
+```
+
+This will generate the appropriate source files in a package based on the 
schema's namespace in the provided destination folder. For instance, to 
generate a User class in package example.avro from the schema defined above, run
+
+```shell
+java -jar /path/to/avro-tools-1.11.4.jar compile schema user.avsc .
+```
+
+Note that if you using the Avro Maven plugin, there is no need to manually 
invoke the schema compiler; the plugin automatically performs code generation 
on any .avsc files present in the configured source directory.
+
+### Creating Users
+Now that we've completed the code generation, let's create some Users, 
serialize them to a data file on disk, and then read back the file and 
deserialize the User objects.
+
+First let's create some Users and set their fields.
+
+```java
+User user1 = new User();
+user1.setName("Alyssa");
+user1.setFavoriteNumber(256);
+// Leave favorite color null
+
+// Alternate constructor
+User user2 = new User("Ben", 7, "red");
+
+// Construct via builder
+User user3 = User.newBuilder()
+             .setName("Charlie")
+             .setFavoriteColor("blue")
+             .setFavoriteNumber(null)
+             .build();
+```
+
+As shown in this example, Avro objects can be created either by invoking a 
constructor directly or by using a builder. Unlike constructors, builders will 
automatically set any default values specified in the schema. Additionally, 
builders validate the data as it set, whereas objects constructed directly will 
not cause an error until the object is serialized. However, using constructors 
directly generally offers better performance, as builders create a copy of the 
datastructure before it  [...]
+
+Note that we do not set user1's favorite color. Since that record is of type 
["string", "null"], we can either set it to a string or leave it null; it is 
essentially optional. Similarly, we set user3's favorite number to null (using 
a builder requires setting all fields, even if they are null).
+
+### Serializing
+Now let's serialize our Users to disk.
+
+```java
+// Serialize user1, user2 and user3 to disk
+DatumWriter<User> userDatumWriter = new SpecificDatumWriter<User>(User.class);
+DataFileWriter<User> dataFileWriter = new 
DataFileWriter<User>(userDatumWriter);
+dataFileWriter.create(user1.getSchema(), new File("users.avro"));
+dataFileWriter.append(user1);
+dataFileWriter.append(user2);
+dataFileWriter.append(user3);
+dataFileWriter.close();
+```
+
+We create a DatumWriter, which converts Java objects into an in-memory 
serialized format. The SpecificDatumWriter class is used with generated classes 
and extracts the schema from the specified generated type.
+
+Next we create a DataFileWriter, which writes the serialized records, as well 
as the schema, to the file specified in the dataFileWriter.create call. We 
write our users to the file via calls to the dataFileWriter.append method. When 
we are done writing, we close the data file.
+
+### Deserializing
+Finally, let's deserialize the data file we just created.
+
+```java
+// Deserialize Users from disk
+DatumReader<User> userDatumReader = new SpecificDatumReader<User>(User.class);
+DataFileReader<User> dataFileReader = new DataFileReader<User>(file, 
userDatumReader);
+User user = null;
+while (dataFileReader.hasNext()) {
+// Reuse user object by passing it to next(). This saves us from
+// allocating and garbage collecting many objects for files with
+// many items.
+user = dataFileReader.next(user);
+System.out.println(user);
+}
+```
+        
+This snippet will output:
+
+```json
+{"name": "Alyssa", "favorite_number": 256, "favorite_color": null}
+{"name": "Ben", "favorite_number": 7, "favorite_color": "red"}
+{"name": "Charlie", "favorite_number": null, "favorite_color": "blue"}
+```
+
+Deserializing is very similar to serializing. We create a SpecificDatumReader, 
analogous to the SpecificDatumWriter we used in serialization, which converts 
in-memory serialized items into instances of our generated class, in this case 
User. We pass the DatumReader and the previously created File to a 
DataFileReader, analogous to the DataFileWriter, which reads both the schema 
used by the writer as well as the data from the file on disk. The data will be 
read using the writer's schema in [...]
+
+Next we use the DataFileReader to iterate through the serialized Users and 
print the deserialized object to stdout. Note how we perform the iteration: we 
create a single User object which we store the current deserialized user in, 
and pass this record object to every call of dataFileReader.next. This is a 
performance optimization that allows the DataFileReader to reuse the same User 
object rather than allocating a new User for every iteration, which can be very 
expensive in terms of obje [...]
+
+### Compiling and running the example code
+This example code is included as a Maven project in the examples/java-example 
directory in the Avro docs. From this directory, execute the following commands 
to build and run the example:
+
+```shell
+$ mvn compile # includes code generation via Avro Maven plugin
+$ mvn -q exec:java -Dexec.mainClass=example.SpecificMain
+```
+
+### Beta feature: Generating faster code
+In release 1.9.0, we introduced a new approach to generating code that speeds 
up decoding of objects by more than 10% and encoding by more than 30% (future 
performance enhancements are underway). To ensure a smooth introduction of this 
change into production systems, this feature is controlled by a feature flag, 
the system property org.apache.avro.specific.use_custom_coders. In this first 
release, this feature is off by default. To turn it on, set the system flag to 
true at runtime. In t [...]
+
+$ mvn -q exec:java -Dexec.mainClass=example.SpecificMain \
+    -Dorg.apache.avro.specific.use_custom_coders=true
+        
+Note that you do not have to recompile your Avro schema to have access to this 
feature. The feature is compiled and built into your code, and you turn it on 
and off at runtime using the feature flag. As a result, you can turn it on 
during testing, for example, and then off in production. Or you can turn it on 
in production, and quickly turn it off if something breaks.
+
+We encourage the Avro community to exercise this new feature early to help 
build confidence. (For those paying one-demand for compute resources in the 
cloud, it can lead to meaningful cost savings.) As confidence builds, we will 
turn this feature on by default, and eventually eliminate the feature flag (and 
the old code).
+
+## Serializing and deserializing without code generation
+Data in Avro is always stored with its corresponding schema, meaning we can 
always read a serialized item regardless of whether we know the schema ahead of 
time. This allows us to perform serialization and deserialization without code 
generation.
+
+Let's go over the same example as in the previous section, but without using 
code generation: we'll create some users, serialize them to a data file on 
disk, and then read back the file and deserialize the users objects.
+
+### Creating users
+First, we use a Parser to read our schema definition and create a Schema 
object.
+
+```java
+Schema schema = new Schema.Parser().parse(new File("user.avsc"));
+```
+
+Using this schema, let's create some users.
+
+```java
+GenericRecord user1 = new GenericData.Record(schema);
+user1.put("name", "Alyssa");
+user1.put("favorite_number", 256);
+// Leave favorite color null
+
+GenericRecord user2 = new GenericData.Record(schema);
+user2.put("name", "Ben");
+user2.put("favorite_number", 7);
+user2.put("favorite_color", "red");
+```
+
+Since we're not using code generation, we use GenericRecords to represent 
users. GenericRecord uses the schema to verify that we only specify valid 
fields. If we try to set a non-existent field (e.g., 
user1.put("favorite_animal", "cat")), we'll get an AvroRuntimeException when we 
run the program.
+
+Note that we do not set user1's favorite color. Since that record is of type 
["string", "null"], we can either set it to a string or leave it null; it is 
essentially optional.
+
+### Serializing
+Now that we've created our user objects, serializing and deserializing them is 
almost identical to the example above which uses code generation. The main 
difference is that we use generic instead of specific readers and writers.
+
+First we'll serialize our users to a data file on disk.
+
+```java
+// Serialize user1 and user2 to disk
+File file = new File("users.avro");
+DatumWriter<GenericRecord> datumWriter = new 
GenericDatumWriter<GenericRecord>(schema);
+DataFileWriter<GenericRecord> dataFileWriter = new 
DataFileWriter<GenericRecord>(datumWriter);
+dataFileWriter.create(schema, file);
+dataFileWriter.append(user1);
+dataFileWriter.append(user2);
+dataFileWriter.close();
+```
+
+We create a DatumWriter, which converts Java objects into an in-memory 
serialized format. Since we are not using code generation, we create a 
GenericDatumWriter. It requires the schema both to determine how to write the 
GenericRecords and to verify that all non-nullable fields are present.
+
+As in the code generation example, we also create a DataFileWriter, which 
writes the serialized records, as well as the schema, to the file specified in 
the dataFileWriter.create call. We write our users to the file via calls to the 
dataFileWriter.append method. When we are done writing, we close the data file.
+
+### Deserializing
+Finally, we'll deserialize the data file we just created.
+
+```java
+// Deserialize users from disk
+DatumReader<GenericRecord> datumReader = new 
GenericDatumReader<GenericRecord>(schema);
+DataFileReader<GenericRecord> dataFileReader = new 
DataFileReader<GenericRecord>(file, datumReader);
+GenericRecord user = null;
+while (dataFileReader.hasNext()) {
+// Reuse user object by passing it to next(). This saves us from
+// allocating and garbage collecting many objects for files with
+// many items.
+user = dataFileReader.next(user);
+System.out.println(user);
+```
+
+This outputs:
+
+```json
+{"name": "Alyssa", "favorite_number": 256, "favorite_color": null}
+{"name": "Ben", "favorite_number": 7, "favorite_color": "red"}
+```
+        
+Deserializing is very similar to serializing. We create a GenericDatumReader, 
analogous to the GenericDatumWriter we used in serialization, which converts 
in-memory serialized items into GenericRecords. We pass the DatumReader and the 
previously created File to a DataFileReader, analogous to the DataFileWriter, 
which reads both the schema used by the writer as well as the data from the 
file on disk. The data will be read using the writer's schema included in the 
file, and the reader's sc [...]
+
+Next, we use the DataFileReader to iterate through the serialized users and 
print the deserialized object to stdout. Note how we perform the iteration: we 
create a single GenericRecord object which we store the current deserialized 
user in, and pass this record object to every call of dataFileReader.next. This 
is a performance optimization that allows the DataFileReader to reuse the same 
record object rather than allocating a new GenericRecord for every iteration, 
which can be very expen [...]
+
+### Compiling and running the example code
+This example code is included as a Maven project in the examples/java-example 
directory in the Avro docs. From this directory, execute the following commands 
to build and run the example:
+
+```shell
+$ mvn compile
+$ mvn -q exec:java -Dexec.mainClass=example.GenericMain
+```
diff --git a/doc/content/en/docs/1.11.4/Getting started (Python)/_index.md 
b/doc/content/en/docs/1.11.4/Getting started (Python)/_index.md
new file mode 100644
index 000000000..d4e40a614
--- /dev/null
+++ b/doc/content/en/docs/1.11.4/Getting started (Python)/_index.md     
@@ -0,0 +1,147 @@
+---
+categories: []
+tags: ["python"] 
+title: "Getting Started (Python)"
+linkTitle: "Getting Started (Python)"
+weight: 3
+---
+
+<!--
+
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+   https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+
+-->
+
+This is a short guide for getting started with Apache Avro™ using Python. This 
guide only covers using Avro for data serialization; see Patrick Hunt's Avro 
RPC Quick Start for a good introduction to using Avro for RPC.
+
+## Notice for Python 3 users
+A package called "avro-python3" had been provided to support Python 3 
previously, but the codebase was consolidated into the "avro" package and that 
supports both Python 2 and 3 now. The avro-python3 package will be removed in 
the near future, so users should use the "avro" package instead. They are 
mostly API compatible, but there's a few minor difference (e.g., function name 
capitalization, such as avro.schema.Parse vs avro.schema.parse).
+
+## Download
+For Python, the easiest way to get started is to install it from PyPI. 
Python's Avro API is available over PyPi.
+
+```shell
+$ python3 -m pip install avro
+```
+
+The official releases of the Avro implementations for C, C++, C#, Java, PHP, 
Python, and Ruby can be downloaded from the Apache Avro™ Releases page. This 
guide uses Avro 1.11.4, the latest version at the time of writing. Download and 
unzip avro-1.11.4.tar.gz, and install via python setup.py (this will probably 
require root privileges). Ensure that you can import avro from a Python prompt.
+
+```shell
+$ tar xvf avro-1.11.4.tar.gz
+$ cd avro-1.11.4
+$ python setup.py install
+$ python
+>>> import avro # should not raise ImportError
+```
+
+Alternatively, you may build the Avro Python library from source. From your 
the root Avro directory, run the commands
+
+```shell
+$ cd lang/py/
+$ python3 -m pip install -e .
+$ python
+```
+
+## Defining a schema
+Avro schemas are defined using JSON. Schemas are composed of primitive types 
(null, boolean, int, long, float, double, bytes, and string) and complex types 
(record, enum, array, map, union, and fixed). You can learn more about Avro 
schemas and types from the specification, but for now let's start with a simple 
schema example, user.avsc:
+
+```json
+{"namespace": "example.avro",
+ "type": "record",
+ "name": "User",
+ "fields": [
+     {"name": "name", "type": "string"},
+     {"name": "favorite_number",  "type": ["int", "null"]},
+     {"name": "favorite_color", "type": ["string", "null"]}
+ ]
+}
+```
+      
+This schema defines a record representing a hypothetical user. (Note that a 
schema file can only contain a single schema definition.) At minimum, a record 
definition must include its type ("type": "record"), a name ("name": "User"), 
and fields, in this case name, favorite_number, and favorite_color. We also 
define a namespace ("namespace": "example.avro"), which together with the name 
attribute defines the "full name" of the schema (example.avro.User in this 
case).
+
+Fields are defined via an array of objects, each of which defines a name and 
type (other attributes are optional, see the record specification for more 
details). The type attribute of a field is another schema object, which can be 
either a primitive or complex type. For example, the name field of our User 
schema is the primitive type string, whereas the favorite_number and 
favorite_color fields are both unions, represented by JSON arrays. unions are a 
complex type that can be any of the  [...]
+
+## Serializing and deserializing without code generation
+Data in Avro is always stored with its corresponding schema, meaning we can 
always read a serialized item, regardless of whether we know the schema ahead 
of time. This allows us to perform serialization and deserialization without 
code generation. Note that the Avro Python library does not support code 
generation.
+
+Try running the following code snippet, which serializes two users to a data 
file on disk, and then reads back and deserializes the data file:
+
+```python
+import avro.schema
+from avro.datafile import DataFileReader, DataFileWriter
+from avro.io import DatumReader, DatumWriter
+
+schema = avro.schema.parse(open("user.avsc", "rb").read())
+
+writer = DataFileWriter(open("users.avro", "wb"), DatumWriter(), schema)
+writer.append({"name": "Alyssa", "favorite_number": 256})
+writer.append({"name": "Ben", "favorite_number": 7, "favorite_color": "red"})
+writer.close()
+
+reader = DataFileReader(open("users.avro", "rb"), DatumReader())
+for user in reader:
+    print(user)
+reader.close()
+```
+
+This outputs:
+
+```json
+{u'favorite_color': None, u'favorite_number': 256, u'name': u'Alyssa'}
+{u'favorite_color': u'red', u'favorite_number': 7, u'name': u'Ben'}
+```
+
+Do make sure that you open your files in binary mode (i.e. using the modes wb 
or rb respectively). Otherwise you might generate corrupt files due to 
automatic replacement of newline characters with the platform-specific 
representations.
+
+Let's take a closer look at what's going on here.
+
+```python
+schema = avro.schema.parse(open("user.avsc", "rb").read())
+```
+
+avro.schema.parse takes a string containing a JSON schema definition as input 
and outputs a avro.schema.Schema object (specifically a subclass of Schema, in 
this case RecordSchema). We're passing in the contents of our user.avsc schema 
file here.
+
+```python
+writer = DataFileWriter(open("users.avro", "wb"), DatumWriter(), schema)
+```
+
+We create a DataFileWriter, which we'll use to write serialized items to a 
data file on disk. The DataFileWriter constructor takes three arguments:
+
+* The file we'll serialize to
+* A DatumWriter, which is responsible for actually serializing the items to 
Avro's binary format (DatumWriters can be used separately from DataFileWriters, 
e.g., to perform IPC with Avro).
+* The schema we're using. The DataFileWriter needs the schema both to write 
the schema to the data file, and to verify that the items we write are valid 
items and write the appropriate fields.
+
+```python
+writer.append({"name": "Alyssa", "favorite_number": 256})
+writer.append({"name": "Ben", "favorite_number": 7, "favorite_color": "red"})
+```
+     
+We use DataFileWriter.append to add items to our data file. Avro records are 
represented as Python dicts. Since the field favorite_color has type ["string", 
"null"], we are not required to specify this field, as shown in the first 
append. Were we to omit the required name field, an exception would be raised. 
Any extra entries not corresponding to a field are present in the dict are 
ignored.
+
+```python
+reader = DataFileReader(open("users.avro", "rb"), DatumReader())
+```
+
+We open the file again, this time for reading back from disk. We use a 
DataFileReader and DatumReader analagous to the DataFileWriter and DatumWriter 
above.
+
+```python
+for user in reader:
+    print(user)
+```
+
+The DataFileReader is an iterator that returns dicts corresponding to the 
serialized items.
diff --git a/doc/content/en/docs/1.11.4/IDL Language/_index.md 
b/doc/content/en/docs/1.11.4/IDL Language/_index.md
new file mode 100644
index 000000000..f50b0a489
--- /dev/null
+++ b/doc/content/en/docs/1.11.4/IDL Language/_index.md 
@@ -0,0 +1,435 @@
+---
+title: "IDL Language"
+linkTitle: "IDL Language"
+weight: 201
+---
+
+<!--
+
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+   https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+
+-->
+
+## Introduction
+This document defines Avro IDL, a higher-level language for authoring Avro 
schemata. Before reading this document, you should have familiarity with the 
concepts of schemata and protocols, as well as the various primitive and 
complex types available in Avro.
+
+## Overview
+
+### Purpose
+The aim of the Avro IDL language is to enable developers to author schemata in 
a way that feels more similar to common programming languages like Java, C++, 
or Python. Additionally, the Avro IDL language may feel more familiar for those 
users who have previously used the interface description languages (IDLs) in 
other frameworks like Thrift, Protocol Buffers, or CORBA.
+
+### Usage
+Each Avro IDL file defines a single Avro Protocol, and thus generates as its 
output a JSON-format Avro Protocol file with extension .avpr.
+
+To convert a _.avdl_ file into a _.avpr_ file, it may be processed by the 
`idl` tool. For example:
+```shell
+$ java -jar avro-tools.jar idl src/test/idl/input/namespaces.avdl 
/tmp/namespaces.avpr
+$ head /tmp/namespaces.avpr
+{
+  "protocol" : "TestNamespace",
+  "namespace" : "avro.test.protocol",
+```
+The `idl` tool can also process input to and from _stdin_ and _stdout_. See 
`idl --help` for full usage information.
+
+A Maven plugin is also provided to compile .avdl files. To use it, add 
something like the following to your pom.xml:
+```xml
+<build>
+  <plugins>
+    <plugin>
+      <groupId>org.apache.avro</groupId>
+      <artifactId>avro-maven-plugin</artifactId>
+      <executions>
+        <execution>
+          <goals>
+            <goal>idl-protocol</goal>
+          </goals>
+        </execution>
+      </executions>
+    </plugin>
+  </plugins>
+</build>
+```
+
+## Defining a Protocol in Avro IDL
+An Avro IDL file consists of exactly one protocol definition. The minimal 
protocol is defined by the following code:
+```java
+protocol MyProtocol {
+}
+```
+This is equivalent to (and generates) the following JSON protocol definition:
+```json
+{
+"protocol" : "MyProtocol",
+  "types" : [ ],
+  "messages" : {
+  }
+}
+```
+The namespace of the protocol may be changed using the @namespace annotation:
+```java
+@namespace("mynamespace")
+protocol MyProtocol {
+}
+```
+This notation is used throughout Avro IDL as a way of specifying properties 
for the annotated element, as will be described later in this document.
+
+Protocols in Avro IDL can contain the following items:
+
+* Imports of external protocol and schema files.
+* Definitions of named schemata, including records, errors, enums, and fixeds.
+* Definitions of RPC messages
+
+## Imports
+Files may be imported in one of three formats:
+
+* An IDL file may be imported with a statement like:
+
+  `import idl "foo.avdl";`
+
+* A JSON protocol file may be imported with a statement like:
+
+  `import protocol "foo.avpr";`
+
+* A JSON schema file may be imported with a statement like:
+
+  `import schema "foo.avsc";`
+
+Messages and types in the imported file are added to this file's protocol.
+
+Imported file names are resolved relative to the current IDL file.
+
+## Defining an Enumeration
+Enums are defined in Avro IDL using a syntax similar to C or Java. An Avro 
Enum supports optional default values. In the case that a reader schema is 
unable to recognize a symbol written by the writer, the reader will fall back 
to using the defined default value. This default is only used when an 
incompatible symbol is read. It is not used if the enum field is missing.
+
+Example Writer Enum Definition
+```java
+enum Shapes {
+  SQUARE, TRIANGLE, CIRCLE, OVAL
+}
+```
+Example Reader Enum Definition
+```java
+enum Shapes {
+  SQUARE, TRIANGLE, CIRCLE
+} = CIRCLE;
+```
+In the above example, the reader will use the default value of `CIRCLE` 
whenever reading data written with the `OVAL` symbol of the writer. Also note 
that, unlike the JSON format, anonymous enums cannot be defined.
+
+## Defining a Fixed Length Field
+Fixed fields are defined using the following syntax:
+```
+fixed MD5(16);
+```
+This example defines a fixed-length type called MD5 which contains 16 bytes.
+
+## Defining Records and Errors
+Records are defined in Avro IDL using a syntax similar to a struct definition 
in C:
+```java
+record Employee {
+  string name;
+  boolean active = true;
+  long salary;
+}
+```
+The above example defines a record with the name “Employee” with three fields.
+
+To define an error, simply use the keyword _error_ instead of _record_. For 
example:
+```java
+error Kaboom {
+  string explanation;
+  int result_code = -1;
+}
+```
+Each field in a record or error consists of a type and a name, optional 
property annotations and an optional default value.
+
+A type reference in Avro IDL must be one of:
+
+* A primitive type
+* A logical type
+* A named schema defined prior to this usage in the same Protocol
+* A complex type (array, map, or union)
+
+### Primitive Types
+The primitive types supported by Avro IDL are the same as those supported by 
Avro's JSON format. This list includes _int_, _long_, _string_, _boolean_, 
_float_, _double_, _null_, and _bytes_.
+
+### Logical Types
+Some of the logical types supported by Avro's JSON format are also supported 
by Avro IDL. The currently supported types are:
+
+* _decimal_ (logical type [decimal]({{< relref "../specification#decimal" >}}))
+* _date_ (logical type [date]({{< relref "../specification#date" >}}))
+* _time_ms_ (logical type [time-millis]({{< relref 
"../specification#time-millisecond-precision" >}}))
+* _timestamp_ms_ (logical type [timestamp-millis]({{< relref 
"../specification#timestamp-millisecond-precision" >}}))
+* _uuid_ (logical type [uuid]({{< relref "../specification#uuid" >}}))
+
+For example:
+```java
+record Job {
+  string jobid;
+  date submitDate;
+  time_ms submitTime;
+  timestamp_ms finishTime;
+  decimal(9,2) finishRatio;
+  uuid pk = "a1a2a3a4-b1b2-c1c2-d1d2-d3d4d5d6d7d8";
+}
+```
+
+Logical types can also be specified via an annotation, which is useful for 
logical types for which a keyword does not exist:
+
+```java
+record Job {
+  string jobid;
+  @logicalType("timestamp-micros")
+  long finishTime;
+}
+```
+
+### References to Named Schemata
+If a named schema has already been defined in the same Avro IDL file, it may 
be referenced by name as if it were a primitive type:
+```java
+record Card {
+  Suit suit; // refers to the enum Card defined above
+  int number;
+}
+```
+
+### Default Values
+Default values for fields may be optionally specified by using an equals sign 
after the field name followed by a JSON expression indicating the default 
value. This JSON is interpreted as described in the [spec]({{< relref 
"../specification#schema-record" >}}).
+
+### Complex Types
+
+#### Arrays
+Array types are written in a manner that will seem familiar to C++ or Java 
programmers. An array of any type t is denoted `array<t>`. For example, an 
array of strings is denoted `array<string>`, and a multidimensional array of 
Foo records would be `array<array<Foo>>`.
+
+#### Maps
+Map types are written similarly to array types. An array that contains values 
of type t is written `map<t>`. As in the JSON schema format, all maps contain 
`string`-type keys.
+
+#### Unions
+Union types are denoted as `union { typeA, typeB, typeC, ... }`. For example, 
this record contains a string field that is optional (unioned with null), and a 
field containing either a precise or a imprecise number:
+```java
+record RecordWithUnion {
+  union { null, string } optionalString;
+  union { decimal(12, 6), float } number;
+}
+```
+Note that the same restrictions apply to Avro IDL unions as apply to unions 
defined in the JSON format; namely, a record may not contain multiple elements 
of the same type. Also, fields/parameters that use the union type and have a 
default parameter must specify a default value of the same type as the 
**first** union type.
+
+Because it occurs so often, there is a special shorthand to denote a union of 
`null` with another type. In the following snippet, the first three fields have 
identical types:
+
+```java
+record RecordWithUnion {
+  union { null, string } optionalString1 = null;
+  string? optionalString2 = null;
+  string? optionalString3; // No default value
+  string? optionalString4 = "something";
+}
+```
+
+Note that unlike explicit unions, the position of the `null` type is fluid; it 
will be the first or last type depending on the default value (if any). So in 
the example above, all fields are valid.
+
+## Defining RPC Messages
+The syntax to define an RPC message within a Avro IDL protocol is similar to 
the syntax for a method declaration within a C header file or a Java interface. 
To define an RPC message add which takes two arguments named _foo_ and _bar_, 
returning an _int_, simply include the following definition within the protocol:
+```java
+int add(int foo, int bar = 0);
+```
+Message arguments, like record fields, may specify default values.
+
+To define a message with no response, you may use the alias _void_, equivalent 
to the Avro _null_ type:
+```java
+void logMessage(string message);
+```
+If you have previously defined an error type within the same protocol, you may 
declare that a message can throw this error using the syntax:
+```java
+void goKaboom() throws Kaboom;
+```
+To define a one-way message, use the keyword `oneway` after the parameter 
list, for example:
+```java
+void fireAndForget(string message) oneway;
+```
+
+## Other Language Features
+
+### Comments
+All Java-style comments are supported within a Avro IDL file. Any text 
following _//_ on a line is ignored, as is any text between _/*_ and _*/_, 
possibly spanning multiple lines.
+
+Comments that begin with _/**_ are used as the documentation string for the 
type or field definition that follows the comment.
+
+### Escaping Identifiers
+Occasionally, one will need to use a reserved language keyword as an 
identifier. In order to do so, backticks (`) may be used to escape the 
identifier. For example, to define a message with the literal name error, you 
may write:
+```java
+void `error`();
+```
+This syntax is allowed anywhere an identifier is expected.
+
+### Annotations for Ordering and Namespaces
+Java-style annotations may be used to add additional properties to types and 
fields throughout Avro IDL.
+
+For example, to specify the sort order of a field within a record, one may use 
the `@order` annotation before the field name as follows:
+```java
+record MyRecord {
+  string @order("ascending") myAscendingSortField;
+  string @order("descending")  myDescendingField;
+  string @order("ignore") myIgnoredField;
+}
+```
+A field's type (with the exception of type references) may also be preceded by 
annotations, e.g.:
+```java
+record MyRecord {
+  @java-class("java.util.ArrayList") array<string> myStrings;
+}
+```
+This can be used to support java classes that can be serialized/deserialized 
via their `toString`/`String constructor`, e.g.:
+```java
+record MyRecord {
+  @java-class("java.math.BigDecimal") string value;
+  @java-key-class("java.io.File") map<string> fileStates;
+  array<@java-class("java.math.BigDecimal") string> weights;
+}
+```
+Similarly, a `@namespace` annotation may be used to modify the namespace when 
defining a named schema. For example:
+```java
+@namespace("org.apache.avro.firstNamespace")
+protocol MyProto {
+  @namespace("org.apache.avro.someOtherNamespace")
+  record Foo {}
+
+  record Bar {}
+}
+```
+will define a protocol in the _firstNamespace_ namespace. The record _Foo_ 
will be defined in _someOtherNamespace_ and _Bar_ will be defined in 
_firstNamespace_ as it inherits its default from its container.
+
+Type and field aliases are specified with the `@aliases` annotation as follows:
+```java
+@aliases(["org.old.OldRecord", "org.ancient.AncientRecord"])
+record MyRecord {
+  string @aliases(["oldField", "ancientField"]) myNewField;
+}
+```
+Some annotations like those listed above are handled specially. All other 
annotations are added as properties to the protocol, message, schema or field.
+
+## Complete Example
+The following is an example of an Avro IDL file that shows most of the above 
features:
+```java
+/*
+* Header with license information.
+*/
+
+/**
+ * An example protocol in Avro IDL
+ */
+@namespace("org.apache.avro.test")
+protocol Simple {
+  /** Documentation for the enum type Kind */
+  @aliases(["org.foo.KindOf"])
+  enum Kind {
+    FOO,
+    BAR, // the bar enum value
+    BAZ
+  } = FOO; // For schema evolution purposes, unmatched values do not throw an 
error, but are resolved to FOO.
+
+  /** MD5 hash; good enough to avoid most collisions, and smaller than (for 
example) SHA256. */
+  fixed MD5(16);
+
+  record TestRecord {
+    /** Record name; has no intrinsic order */
+    string @order("ignore") name;
+
+    Kind @order("descending") kind;
+
+    MD5 hash;
+
+    /*
+    Note that 'null' is the first union type. Just like .avsc / .avpr files, 
the default value must be of the first union type.
+    */
+    union { null, MD5 } /** Optional field */ @aliases(["hash"]) nullableHash 
= null;
+
+    array<long> arrayOfLongs;
+  }
+
+  /** Errors are records that can be thrown from a method */
+  error TestError {
+    string message;
+  }
+
+  string hello(string greeting);
+  /** Return what was given. Demonstrates the use of backticks to name 
types/fields/messages/parameters after keywords */
+  TestRecord echo(TestRecord `record`);
+  int add(int arg1, int arg2);
+  bytes echoBytes(bytes data);
+  void `error`() throws TestError;
+  // The oneway keyword forces the method to return null.
+  void ping() oneway;
+}
+```
+Additional examples may be found in the Avro source tree under the 
`src/test/idl/input` directory.
+
+## IDE support
+
+There are several editors and IDEs that support Avro IDL files, usually via 
plugins.
+
+### JetBrains
+
+Apache Avro IDL Schema Support 203.1.2 was released in 9 December 2021.
+
+Features:
+* Syntax Highlighting
+* Code Completion
+* Code Formatting
+* Error Highlighting
+* Inspections & quick fixes
+* JSON schemas for .avpr and .avsc files
+
+It's available via the [JetBrains 
Marketplace](https://plugins.jetbrains.com/plugin/15728-apache-avro-idl-schema-support)
+and on [GitHub](https://github.com/opwvhk/avro-schema-support).
+
+The plugin supports almost the all JetBrains products: IntelliJ IDEA, PyCharm, 
WebStorm, Android Studio, AppCode, GoLand, Rider, CLion, RubyMine, PhpStorm, 
DataGrip, DataSpell, MPS, Code With Me Guest and JetBrains Client.
+
+Only JetBrains Gateway does not support this plugin directly. But the backend 
(JetBrains) IDE that it connects to does.
+
+### Eclipse
+
+Avroclipse 0.0.11 was released on 4 December 2019.
+
+Features:
+* Syntax Highlighting
+* Error Highlighting
+* Code Completion
+
+It is available on the [Eclipse 
Marketplace](https://marketplace.eclipse.org/content/avroclipse)
+and [GitHub](https://github.com/dvdkruk/avroclipse).
+
+### Visual Studio Code
+
+avro-idl 0.5.0 was released on 16 June 2021. It provides syntax highlighting.
+
+It is available on the [VisualStudio 
Marketplace](https://marketplace.visualstudio.com/items?itemName=streetsidesoftware.avro)
+and [GitHub](https://github.com/Jason3S/vscode-avro-ext)
+
+### Atom.io
+
+atom-language-avro 0.0.13 was released on 14 August 2015. It provides syntax 
highlighting.
+
+It is available as [Atom.io 
package](https://atom.io/packages/atom-language-avro)
+and [GitHub](https://github.com/jonesetc/atom-language-avro)
+
+### Vim
+
+A `.avdl` detecting plugin by Gurpreet Atwal on 
[GitHub](https://github.com/gurpreetatwal/vim-avro) (Last change in December 
2016)
+
+[avro-idl.vim](https://github.com/apache/avro/blob/master/share/editors/avro-idl.vim)
 in the Avro repository `share/editors` directory (last change in September 
2010)
+
+Both provide syntax highlighting.
diff --git a/doc/content/en/docs/1.11.4/MapReduce guide/_index.md 
b/doc/content/en/docs/1.11.4/MapReduce guide/_index.md
new file mode 100644
index 000000000..2540ff822
--- /dev/null
+++ b/doc/content/en/docs/1.11.4/MapReduce guide/_index.md      
@@ -0,0 +1,396 @@
+---
+title: "MapReduce guide"
+linkTitle: "MapReduce guide"
+weight: 200
+---
+
+<!--
+
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+   https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+
+-->
+
+Avro provides a convenient way to represent complex data structures within a 
Hadoop MapReduce job. Avro data can be used as both input to and output from a 
MapReduce job, as well as the intermediate format. The example in this guide 
uses Avro data for all three, but it's possible to mix and match; for instance, 
MapReduce can be used to aggregate a particular field in an Avro record.
+
+This guide assumes basic familiarity with both Hadoop MapReduce and Avro. See 
the [Hadoop documentation](https://hadoop.apache.org/docs/current/) and the 
[Avro getting started guide](./getting-started-java/) for introductions to 
these projects. This guide uses the old MapReduce API 
(`org.apache.hadoop.mapred`) and the new MapReduce API 
(`org.apache.hadoop.mapreduce`).
+
+## Setup
+The code from this guide is included in the Avro docs under 
examples/mr-example. The example is set up as a Maven project that includes the 
necessary Avro and MapReduce dependencies and the Avro Maven plugin for code 
generation, so no external jars are needed to run the example. In particular, 
the POM includes the following dependencies:
+```xml
+<dependency>
+  <groupId>org.apache.avro</groupId>
+  <artifactId>avro</artifactId>
+  <version>1.11.4</version>
+</dependency>
+<dependency>
+  <groupId>org.apache.avro</groupId>
+  <artifactId>avro-mapred</artifactId>
+  <version>1.11.4</version>
+</dependency>
+<dependency>
+  <groupId>org.apache.hadoop</groupId>
+  <artifactId>hadoop-client</artifactId>
+  <version>3.1.2</version>
+</dependency>
+```
+And the following plugin:
+```xml
+<plugin>
+  <groupId>org.apache.avro</groupId>
+  <artifactId>avro-maven-plugin</artifactId>
+  <version>1.11.4</version>
+  <executions>
+    <execution>
+      <phase>generate-sources</phase>
+      <goals>
+        <goal>schema</goal>
+      </goals>
+      <configuration>
+        <sourceDirectory>${project.basedir}/../</sourceDirectory>
+        
<outputDirectory>${project.basedir}/target/generated-sources/</outputDirectory>
+      </configuration>
+    </execution>
+  </executions>
+</plugin>
+```
+
+If you do not configure the *sourceDirectory* and *outputDirectory* 
properties, the defaults will be used. The *sourceDirectory* property defaults 
to *src/main/avro*. The *outputDirectory* property defaults to 
*target/generated-sources*. You can change the paths to match your project 
layout.
+
+Alternatively, Avro jars can be downloaded directly from the Apache Avro™ 
Releases [page](https://avro.apache.org/releases.html). The relevant Avro jars 
for this guide are *avro-1.11.4.jar* and *avro-mapred-1.11.4.jar*, as well as 
*avro-tools-1.11.4.jar* for code generation and viewing Avro data files as 
JSON. In addition, you will need to install Hadoop in order to use MapReduce.
+
+## Example: ColorCount
+Below is a simple example of a MapReduce that uses Avro. There is an example 
for both the old (org.apache.hadoop.mapred) and new 
(org.apache.hadoop.mapreduce) APIs under 
*examples/mr-example/src/main/java/example/*. _MapredColorCount_ is the example 
for the older mapred API while _MapReduceColorCount_ is the example for the 
newer mapreduce API. Both examples are below, but we will detail the mapred API 
in our subsequent examples.
+
+MapredColorCount.java:
+```java
+package example;
+
+import java.io.IOException;
+
+import org.apache.avro.*;
+import org.apache.avro.Schema.Type;
+import org.apache.avro.mapred.*;
+import org.apache.hadoop.conf.*;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.*;
+import org.apache.hadoop.util.*;
+
+import example.avro.User;
+
+public class MapredColorCount extends Configured implements Tool {
+
+  public static class ColorCountMapper extends AvroMapper<User, 
Pair<CharSequence, Integer>> {
+    @Override
+    public void map(User user, AvroCollector<Pair<CharSequence, Integer>> 
collector, Reporter reporter)
+        throws IOException {
+      CharSequence color = user.getFavoriteColor();
+      // We need this check because the User.favorite_color field has type 
["string", "null"]
+      if (color == null) {
+        color = "none";
+      }
+      collector.collect(new Pair<CharSequence, Integer>(color, 1));
+    }
+  }
+
+  public static class ColorCountReducer extends AvroReducer<CharSequence, 
Integer,
+                                                            Pair<CharSequence, 
Integer>> {
+    @Override
+    public void reduce(CharSequence key, Iterable<Integer> values,
+                       AvroCollector<Pair<CharSequence, Integer>> collector,
+                       Reporter reporter)
+        throws IOException {
+      int sum = 0;
+      for (Integer value : values) {
+        sum += value;
+      }
+      collector.collect(new Pair<CharSequence, Integer>(key, sum));
+    }
+  }
+
+  public int run(String[] args) throws Exception {
+    if (args.length != 2) {
+      System.err.println("Usage: MapredColorCount <input path> <output path>");
+      return -1;
+    }
+
+    JobConf conf = new JobConf(getConf(), MapredColorCount.class);
+    conf.setJobName("colorcount");
+
+    FileInputFormat.setInputPaths(conf, new Path(args[0]));
+    FileOutputFormat.setOutputPath(conf, new Path(args[1]));
+
+    AvroJob.setMapperClass(conf, ColorCountMapper.class);
+    AvroJob.setReducerClass(conf, ColorCountReducer.class);
+
+    // Note that AvroJob.setInputSchema and AvroJob.setOutputSchema set
+    // relevant config options such as input/output format, map output
+    // classes, and output key class.
+    AvroJob.setInputSchema(conf, User.getClassSchema());
+    AvroJob.setOutputSchema(conf, 
Pair.getPairSchema(Schema.create(Type.STRING),
+        Schema.create(Type.INT)));
+
+    JobClient.runJob(conf);
+    return 0;
+  }
+
+  public static void main(String[] args) throws Exception {
+    int res = ToolRunner.run(new Configuration(), new MapredColorCount(), 
args);
+    System.exit(res);
+  }
+}
+```
+
+MapReduceColorCount.java:
+```java
+package example;
+
+import java.io.IOException;
+
+import org.apache.avro.Schema;
+import org.apache.avro.mapred.AvroKey;
+import org.apache.avro.mapred.AvroValue;
+import org.apache.avro.mapreduce.AvroJob;
+import org.apache.avro.mapreduce.AvroKeyInputFormat;
+import org.apache.avro.mapreduce.AvroKeyValueOutputFormat;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.NullWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.mapreduce.Reducer;
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+
+import example.avro.User;
+
+public class MapReduceColorCount extends Configured implements Tool {
+
+  public static class ColorCountMapper extends
+      Mapper<AvroKey<User>, NullWritable, Text, IntWritable> {
+
+    @Override
+    public void map(AvroKey<User> key, NullWritable value, Context context)
+        throws IOException, InterruptedException {
+
+      CharSequence color = key.datum().getFavoriteColor();
+      if (color == null) {
+        color = "none";
+      }
+      context.write(new Text(color.toString()), new IntWritable(1));
+    }
+  }
+
+  public static class ColorCountReducer extends
+      Reducer<Text, IntWritable, AvroKey<CharSequence>, AvroValue<Integer>> {
+
+    @Override
+    public void reduce(Text key, Iterable<IntWritable> values,
+        Context context) throws IOException, InterruptedException {
+
+      int sum = 0;
+      for (IntWritable value : values) {
+        sum += value.get();
+      }
+      context.write(new AvroKey<CharSequence>(key.toString()), new 
AvroValue<Integer>(sum));
+    }
+  }
+
+  public int run(String[] args) throws Exception {
+    if (args.length != 2) {
+      System.err.println("Usage: MapReduceColorCount <input path> <output 
path>");
+      return -1;
+    }
+
+    Job job = new Job(getConf());
+    job.setJarByClass(MapReduceColorCount.class);
+    job.setJobName("Color Count");
+
+    FileInputFormat.setInputPaths(job, new Path(args[0]));
+    FileOutputFormat.setOutputPath(job, new Path(args[1]));
+
+    job.setInputFormatClass(AvroKeyInputFormat.class);
+    job.setMapperClass(ColorCountMapper.class);
+    AvroJob.setInputKeySchema(job, User.getClassSchema());
+    job.setMapOutputKeyClass(Text.class);
+    job.setMapOutputValueClass(IntWritable.class);
+
+    job.setOutputFormatClass(AvroKeyValueOutputFormat.class);
+    job.setReducerClass(ColorCountReducer.class);
+    AvroJob.setOutputKeySchema(job, Schema.create(Schema.Type.STRING));
+    AvroJob.setOutputValueSchema(job, Schema.create(Schema.Type.INT));
+
+    return (job.waitForCompletion(true) ? 0 : 1);
+  }
+
+  public static void main(String[] args) throws Exception {
+    int res = ToolRunner.run(new MapReduceColorCount(), args);
+    System.exit(res);
+  }
+}
+```
+ColorCount reads in data files containing *User* records, defined in 
_examples/user.avsc_, and counts the number of instances of each favorite 
color. (This example draws inspiration from the canonical _WordCount_ MapReduce 
application.) This example uses the old MapReduce API. See 
MapReduceAvroWordCount, found under 
_doc/examples/mr-example/src/main/java/example/_ to see the new MapReduce API 
example. The User schema is defined as follows:
+```json
+{"namespace": "example.avro",
+ "type": "record",
+ "name": "User",
+ "fields": [
+     {"name": "name", "type": "string"},
+     {"name": "favorite_number",  "type": ["int", "null"]},
+     {"name": "favorite_color", "type": ["string", "null"]}
+ ]
+}
+```
+This schema is compiled into the *User* class used by *ColorCount* via the 
Avro Maven plugin (see _examples/mr-example/pom.xml_ for how this is set up).
+
+*ColorCountMapper* essentially takes a *User* as input and extracts the User's 
favorite color, emitting the key-value pair `<favoriteColor, 1>`. 
_ColorCountReducer_ then adds up how many occurrences of a particular favorite 
color were emitted, and outputs the result as a Pair record. These Pairs are 
serialized to an Avro data file.
+
+## Running ColorCount
+The _ColorCount_ application is provided as a Maven project in the Avro docs 
under _examples/mr-example_. To build the project, including the code 
generation of the User schema, run:
+```shell
+mvn compile
+```
+Next, run _GenerateData_ from `examples/mr-examples` to create an Avro data 
file, `input/users.avro`, containing 20 Users with favorite colors chosen 
randomly from a list:
+```shell
+mvn exec:java -q -Dexec.mainClass=example.GenerateData
+```
+Besides creating the data file, GenerateData prints the JSON representations 
of the Users generated to stdout, for example:
+```json
+{"name": "user", "favorite_number": null, "favorite_color": "red"}
+{"name": "user", "favorite_number": null, "favorite_color": "green"}
+{"name": "user", "favorite_number": null, "favorite_color": "purple"}
+{"name": "user", "favorite_number": null, "favorite_color": null}
+...
+``` 
+Now we're ready to run ColorCount. We specify our freshly-generated input 
folder as the input path and output as our output folder (note that MapReduce 
will not start a job if the output folder already exists):
+```shell
+mvn exec:java -q -Dexec.mainClass=example.MapredColorCount -Dexec.args="input 
output"
+```
+Once ColorCount completes, checking the contents of the new output directory 
should yield the following:
+```shell
+$ ls output/
+part-00000.avro  _SUCCESS
+```
+You can check the contents of the generated Avro file using the avro-tools jar:
+```shell
+$ java -jar /path/to/avro-tools-1.11.4.jar tojson output/part-00000.avro
+{"value": 3, "key": "blue"}
+{"value": 7, "key": "green"}
+{"value": 1, "key": "none"}
+{"value": 2, "key": "orange"}
+{"value": 3, "key": "purple"}
+{"value": 2, "key": "red"}
+{"value": 2, "key": "yellow"}
+```
+Now let's go over the ColorCount example in detail.
+
+## AvroMapper - org.apache.hadoop.mapred API
+
+The easiest way to use Avro data files as input to a MapReduce job is to 
subclass `AvroMapper`. An `AvroMapper` defines a `map` function that takes an 
Avro datum as input and outputs a key/value pair represented as a Pair record. 
In the ColorCount example, ColorCountMapper is an AvroMapper that takes a User 
as input and outputs a `Pair<CharSequence, Integer>>`, where the CharSequence 
key is the user's favorite color and the Integer value is 1.
+```java
+public static class ColorCountMapper extends AvroMapper<User, 
Pair<CharSequence, Integer>> {
+  @Override
+  public void map(User user, AvroCollector<Pair<CharSequence, Integer>> 
collector, Reporter reporter)
+      throws IOException {
+    CharSequence color = user.getFavoriteColor();
+    // We need this check because the User.favorite_color field has type 
["string", "null"]
+    if (color == null) {
+      color = "none";
+    }
+    collector.collect(new Pair<CharSequence, Integer>(color, 1));
+  }
+}
+```
+In order to use our AvroMapper, we must call AvroJob.setMapperClass and 
AvroJob.setInputSchema.
+```java
+AvroJob.setMapperClass(conf, ColorCountMapper.class);
+AvroJob.setInputSchema(conf, User.getClassSchema());
+```
+Note that `AvroMapper` does not implement the `Mapper` interface. Under the 
hood, the specified Avro data files are deserialized into AvroWrappers 
containing the actual data, which are processed by a Mapper that calls the 
configured AvroMapper's map function. AvroJob.setInputSchema sets up the 
relevant configuration parameters needed to make this happen, thus you should 
not need to call `JobConf.setMapperClass`, `JobConf.setInputFormat`, 
`JobConf.setMapOutputKeyClass`, `JobConf.setMapOut [...]
+
+## Mapper - org.apache.hadoop.mapreduce API
+This document will not go into all the differences between the mapred and 
mapreduce APIs, however will describe the main differences. As you can see, 
ColorCountMapper is now a subclass of the Hadoop Mapper class and is passed an 
AvroKey as it's key. Additionally, the AvroJob method calls were slightly 
changed.
+```java
+  public static class ColorCountMapper extends
+      Mapper<AvroKey<User>, NullWritable, Text, IntWritable> {
+
+    @Override
+    public void map(AvroKey<User> key, NullWritable value, Context context)
+        throws IOException, InterruptedException {
+
+      CharSequence color = key.datum().getFavoriteColor();
+      if (color == null) {
+        color = "none";
+      }
+      context.write(new Text(color.toString()), new IntWritable(1));
+    }
+  }
+```
+
+## AvroReducer - org.apache.hadoop.mapred API
+Analogously to AvroMapper, an AvroReducer defines a reducer function that 
takes the key/value types output by an AvroMapper (or any mapper that outputs 
Pairs) and outputs a key/value pair represented a Pair record. In the 
ColorCount example, ColorCountReducer is an AvroReducer that takes the 
CharSequence key representing a favorite color and the `Iterable<Integer>` 
representing the counts for that color (they should all be 1 in this example) 
and adds up the counts.
+```java
+public static class ColorCountReducer extends AvroReducer<CharSequence, 
Integer,
+                                                          Pair<CharSequence, 
Integer>> {
+  @Override
+  public void reduce(CharSequence key, Iterable<Integer> values,
+                     AvroCollector<Pair<CharSequence, Integer>> collector,
+                     Reporter reporter)
+      throws IOException {
+    int sum = 0;
+    for (Integer value : values) {
+      sum += value;
+    }
+    collector.collect(new Pair<CharSequence, Integer>(key, sum));
+  }
+}
+```
+In order to use our AvroReducer, we must call AvroJob.setReducerClass and 
AvroJob.setOutputSchema.
+```java
+AvroJob.setReducerClass(conf, ColorCountReducer.class);
+AvroJob.setOutputSchema(conf, Pair.getPairSchema(Schema.create(Type.STRING),
+                                                 Schema.create(Type.INT)));
+```
+Note that _AvroReducer_ does not implement the _Reducer_ interface. The 
intermediate Pairs output by the mapper are split into _AvroKeys_ and 
_AvroValues_, which are processed by a Reducer that calls the configured 
AvroReducer's `reduce` function. `AvroJob.setOutputSchema` sets up the relevant 
configuration parameters needed to make this happen, thus you should not need 
to call `JobConf.setReducerClass`, `JobConf.setOutputFormat`, 
`JobConf.setOutputKeyClass`, `JobConf.setMapOutputKeyClas [...]
+
+## Reduce - org.apache.hadoop.mapreduce API
+As before we not detail every difference between the APIs. As with the 
_Mapper_ change _ColorCountReducer_ is now a subclass of _Reducer_ and 
_AvroKey_ and _AvroValue_ are emitted. Additionally, the _AvroJob_ method calls 
were slightly changed.
+```java
+  public static class ColorCountReducer extends
+      Reducer<Text, IntWritable, AvroKey<CharSequence>, AvroValue<Integer>> {
+
+    @Override
+    public void reduce(Text key, Iterable<IntWritable> values,
+        Context context) throws IOException, InterruptedException {
+
+      int sum = 0;
+      for (IntWritable value : values) {
+        sum += value.get();
+      }
+      context.write(new AvroKey<CharSequence>(key.toString()), new 
AvroValue<Integer>(sum));
+    }
+  }
+```
+
+## Learning more
+The mapred API allows users to mix Avro AvroMappers and AvroReducers with 
non-Avro Mappers and Reducers and the mapreduce API allows users input Avro and 
output non-Avro or vice versa.
+
+The mapred package has API org.apache.avro.mapred documentation as does the 
`org.apache.avro.mapreduce` package. MapReduce API 
(`org.apache.hadoop.mapreduce`). Similarily to the mapreduce package, it's 
possible with the mapred API to implement your own Mappers and Reducers 
directly using the public classes provided in these libraries. See the 
`AvroWordCount` application, found under 
_examples/mr-example/src/main/java/example/AvroWordCount.java_ in the Avro 
documentation, for an example o [...]
diff --git a/doc/content/en/docs/1.11.4/SASL profile/_index.md 
b/doc/content/en/docs/1.11.4/SASL profile/_index.md
new file mode 100644
index 000000000..67c316e22
--- /dev/null
+++ b/doc/content/en/docs/1.11.4/SASL profile/_index.md 
@@ -0,0 +1,93 @@
+---
+title: "SASL profile"
+linkTitle: "SASL profile"
+weight: 202
+---
+
+<!--
+
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+   https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+
+-->
+
+## Introduction
+SASL ([RFC 2222](https://www.ietf.org/rfc/rfc2222.txt)) provides a framework 
for authentication and security of network protocols. Each protocol that uses 
SASL is meant to define a SASL profile. This document provides a SASL profile 
for connection-based Avro RPC.
+
+## Overview
+SASL negotiation proceeds as a series of message interactions over a 
connection between a client and server using a selected SASL mechanism. The 
client starts this negotiation by sending its chosen mechanism name with an 
initial (possibly empty) message. Negotiation proceeds with the exchange of 
messages until either side indicates success or failure. The content of the 
messages is mechanism-specific. If the negotiation succeeds, then the session 
can proceed over the connection, otherwis [...]
+
+Some mechanisms continue to process session data after negotiation (e.g., 
encrypting it), while some specify that further session data is transmitted 
unmodifed.
+
+## Negotiation
+
+### Commands
+Avro SASL negotiation uses four one-byte commands.
+
+* 0: START Used in a client's initial message.
+* 1: CONTINUE Used while negotiation is ongoing.
+* 2: FAIL Terminates negotiation unsuccessfully.
+* 3: COMPLETE Terminates negotiation sucessfully.
+
+The format of a START message is:
+
+`| 0 | 4-byte mechanism name length | mechanism name | 4-byte payload length | 
payload data |`
+
+The format of a CONTINUE message is:
+
+`| 1 | 4-byte payload length | payload data |`
+
+The format of a FAIL message is:
+
+`| 2 | 4-byte message length | UTF-8 message |`
+
+The format of a COMPLETE message is:
+
+`| 3 | 4-byte payload length | payload data |`
+
+### Process
+Negotiation is initiated by a client sending a START command containing the 
client's chosen mechanism name and any mechanism-specific payload data.
+
+The server and client then interchange some number (possibly zero) of CONTINUE 
messages. Each message contains payload data that is processed by the security 
mechanism to generate the next message.
+
+Once either the client or server send a FAIL message then negotiation has 
failed. UTF-8-encoded text is included in the failure message. Once either a 
FAIL message has been sent or received, or any other error occurs in the 
negotiation, further communication on this connection must cease.
+
+Once either the client or server send a COMPLETE message then negotiation has 
completed successfully. Session data may now be transmitted over the connection 
until it is closed by either side.
+
+## Session Data
+If no SASL QOP (quality of protection) is negotiated, then all subsequent 
writes to/reads over this connection are written/read unmodified. In 
particular, messages use Avro [framing](#Message+Framing), and are of the form:
+
+`| 4-byte frame length | frame data | ... | 4 zero bytes |`
+
+If a SASL QOP is negotiated, then it must be used by the connection for all 
subsequent messages. This is done by wrapping each non-empty frame written 
using the security mechanism and unwrapping each non-empty frame read. The 
length written in each non-empty frame is the length of the wrapped data. 
Complete frames must be passed to the security mechanism for unwrapping. 
Unwrapped data is then passed to the application as the content of the frame.
+
+If at any point processing fails due to wrapping, unwrapping or framing 
errors, then all further communication on this connection must cease.
+
+## Anonymous Mechanism
+The SASL anonymous mechanism ([RFC 
2245](https://www.ietf.org/rfc/rfc2222.txt)) is quite simple to implement. In 
particular, an initial anonymous request may be prefixed by the following 
static sequence:
+
+`| 0 | 0009 | ANONYMOUS | 0000 |`
+
+If a server uses the anonymous mechanism, it should check that the mechanism 
name in the start message prefixing the first request received is 'ANONYMOUS', 
then simply prefix its initial response with a COMPLETE message of:
+
+`| 3 | 0000 |`
+
+If an anonymous server recieves some other mechanism name, then it may respond 
with a FAIL message as simple as:
+
+`| 2 | 0000 |`
+
+Note that the anonymous mechanism need add no additional round-trip messages 
between client and server. The START message can be piggybacked on the initial 
request and the COMPLETE or FAIL message can be piggybacked on the initial 
response.
diff --git a/doc/content/en/docs/1.11.4/Specification/_index.md 
b/doc/content/en/docs/1.11.4/Specification/_index.md
new file mode 100755
index 000000000..7cc5a1754
--- /dev/null
+++ b/doc/content/en/docs/1.11.4/Specification/_index.md
@@ -0,0 +1,848 @@
+---
+title: "Specification"
+linkTitle: "Specification"
+weight: 4
+date: 2021-10-25
+aliases:
+- spec.html
+---
+
+<!--
+
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+   https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+
+-->
+
+## Introduction
+This document defines Apache Avro. It is intended to be the authoritative 
specification. Implementations of Avro must adhere to this document.
+
+## Schema Declaration {#schema-declaration}
+A Schema is represented in [JSON](https://www.json.org/) by one of:
+
+* A JSON string, naming a defined type.
+* A JSON object, of the form:
+```js
+{"type": "typeName", ...attributes...}
+```
+where _typeName_ is either a primitive or derived type name, as defined below. 
Attributes not defined in this document are permitted as metadata, but must not 
affect the format of serialized data.
+* A JSON array, representing a union of embedded types.
+
+## Primitive Types
+The set of primitive type names is:
+
+* _null_: no value
+* _boolean_: a binary value
+* _int_: 32-bit signed integer
+* _long_: 64-bit signed integer
+* _float_: single precision (32-bit) IEEE 754 floating-point number
+* _double_: double precision (64-bit) IEEE 754 floating-point number
+* _bytes_: sequence of 8-bit unsigned bytes
+* _string_: unicode character sequence
+
+Primitive types have no specified attributes.
+
+Primitive type names are also defined type names. Thus, for example, the 
schema "string" is equivalent to:
+```json
+{"type": "string"}
+```
+
+## Complex Types
+Avro supports six kinds of complex types: _records_, _enums_, _arrays_, 
_maps_, _unions_ and _fixed_.
+
+### Records {#schema-record}
+Records use the type name "record" and support the following attributes:
+
+* _name_: a JSON string providing the name of the record (required).
+* _namespace_, a JSON string that qualifies the name (optional);
+* _doc_: a JSON string providing documentation to the user of this schema 
(optional).
+* _aliases_: a JSON array of strings, providing alternate names for this 
record (optional).
+* _fields_: a JSON array, listing fields (required). Each field is a JSON 
object with the following attributes:
+  * _name_: a JSON string providing the name of the field (required), and
+  * _doc_: a JSON string describing this field for users (optional).
+  * _type_: a [schema]({{< ref "#schema-declaration" >}} "Schema 
declaration"), as defined above
+  * _order_: specifies how this field impacts sort ordering of this record 
(optional). Valid values are "ascending" (the default), "descending", or 
"ignore". For more details on how this is used, see the sort order section 
below.
+  * _aliases_: a JSON array of strings, providing alternate names for this 
field (optional).
+  * _default_: A default value for this field, only used when reading 
instances that lack the field for schema evolution purposes. The presence of a 
default value does not make the field optional at encoding time. Permitted 
values depend on the field's schema type, according to the table below. Default 
values for union fields correspond to the first schema in the union. Default 
values for bytes and fixed fields are JSON strings, where Unicode code points 
0-255 are mapped to unsigned 8-bi [...]
+
+*field default values*
+
+| **avro type** | **json type**  | **example** |
+|---------------|----------------|-------------|
+| null          | null           | `null`      |
+| boolean       | boolean        | `true`      |
+| int,long      | integer        | `1`         |
+| float,double  | number         | `1.1`       |
+| bytes         | string         | `"\u00FF"`  |
+| string        | string         | `"foo"`     |
+| record        | object         | `{"a": 1}`  |
+| enum          | string         | `"FOO"`     |
+| array         | array          | `[1]`       |
+| map           | object         | `{"a": 1}`  |
+| fixed         | string         | `"\u00ff"`  |
+
+For example, a linked-list of 64-bit values may be defined with:
+```jsonc
+{
+  "type": "record",
+  "name": "LongList",
+  "aliases": ["LinkedLongs"],                      // old name for this
+  "fields" : [
+    {"name": "value", "type": "long"},             // each element has a long
+    {"name": "next", "type": ["null", "LongList"]} // optional next element
+  ]
+}
+```
+         
+### Enums
+Enums use the type name "enum" and support the following attributes:
+
+* _name_: a JSON string providing the name of the enum (required).
+* _namespace_, a JSON string that qualifies the name (optional);
+* _aliases_: a JSON array of strings, providing alternate names for this enum 
(optional).
+* _doc_: a JSON string providing documentation to the user of this schema 
(optional).
+* _symbols_: a JSON array, listing symbols, as JSON strings (required). All 
symbols in an enum must be unique; duplicates are prohibited. Every symbol must 
match the regular expression [A-Za-z_][A-Za-z0-9_]* (the same requirement as 
for [names]({{< ref "#names" >}} "Names")).
+* _default_: A default value for this enumeration, used during resolution when 
the reader encounters a symbol from the writer that isn't defined in the 
reader's schema (optional). The value provided here must be a JSON string 
that's a member of the symbols array. See documentation on schema resolution 
for how this gets used.
+
+For example, playing card suits might be defined with:
+```json
+{
+  "type": "enum",
+  "name": "Suit",
+  "symbols" : ["SPADES", "HEARTS", "DIAMONDS", "CLUBS"]
+}
+```
+         
+### Arrays
+Arrays use the type name "array" and support a single attribute:
+
+* _items_: the schema of the array's items.
+
+For example, an array of strings is declared with:
+```json
+{
+  "type": "array",
+  "items" : "string",
+  "default": []
+}
+```
+    
+### Maps
+Maps use the type name "map" and support one attribute:
+
+* _values_: the schema of the map's values.
+
+Map keys are assumed to be strings.
+
+For example, a map from string to long is declared with:
+```json
+{
+  "type": "map",
+  "values" : "long",
+  "default": {}
+}
+```
+    
+### Unions
+Unions, as mentioned above, are represented using JSON arrays. For example, 
`["null", "string"]` declares a schema which may be either a null or string.
+
+(Note that when a [default value]({{< ref "#schema-record" >}} "Schema 
record") is specified for a record field whose type is a union, the type of the 
default value must match the first element of the union. Thus, for unions 
containing "null", the "null" is usually listed first, since the default value 
of such unions is typically null.)
+
+Unions may not contain more than one schema with the same type, except for the 
named types record, fixed and enum. For example, unions containing two array 
types or two map types are not permitted, but two types with different names 
are permitted. (Names permit efficient resolution when reading and writing 
unions.)
+
+Unions may not immediately contain other unions.
+
+### Fixed
+Fixed uses the type name "fixed" and supports the following attributes:
+
+* _name_: a string naming this fixed (required).
+* _namespace_, a string that qualifies the name (optional);
+* _aliases_: a JSON array of strings, providing alternate names for this enum 
(optional).
+* _size_: an integer, specifying the number of bytes per value (required).
+
+For example, 16-byte quantity may be declared with:
+```json
+{"type": "fixed", "size": 16, "name": "md5"}
+```
+
+### Names {#names}
+Record, enums and fixed are named types. Each has a fullname that is composed 
of two parts; a name and a namespace, separated by a dot. Equality of names is 
defined on the fullname.
+
+Record fields and enum symbols have names as well (but no namespace). Equality 
of fields and enum symbols is defined on the name of the field/symbol within 
its scope (the record/enum that defines it). Fields and enum symbols across 
scopes are never equal.
+
+The name portion of the fullname of named types, record field names, and enum 
symbols must:
+
+* start with [A-Za-z_]
+* subsequently contain only [A-Za-z0-9_]
+
+A namespace is a dot-separated sequence of such names. The empty string may 
also be used as a namespace to indicate the null namespace. Equality of names 
(including field names and enum symbols) as well as fullnames is case-sensitive.
+
+The null namespace may not be used in a dot-separated sequence of names. So 
the grammar for a namespace is:
+```
+  <empty> | <name>[(<dot><name>)*]
+```
+
+In record, enum and fixed definitions, the fullname is determined according to 
the algorithm below the example:
+
+```
+{
+  "type": "record",
+  "name": "Example",
+  "doc": "A simple name (attribute) and no namespace attribute: use the null 
namespace (\"\"); the fullname is 'Example'.",
+  "fields": [
+    {
+      "name": "inheritNull",
+      "type": {
+        "type": "enum",
+        "name": "Simple",
+        "doc": "A simple name (attribute) and no namespace attribute: inherit 
the null namespace of the enclosing type 'Example'. The fullname is 'Simple'.",
+        "symbols": ["a", "b"]
+      }
+    }, {
+      "name": "explicitNamespace",
+      "type": {
+        "type": "fixed",
+        "name": "Simple",
+        "namespace": "explicit",
+        "doc": "A simple name (attribute) and a namespace (attribute); the 
fullname is 'explicit.Simple' (this is a different type than of the 
'inheritNull' field).",
+        "size": 12
+      }
+    }, {
+      "name": "fullName",
+      "type": {
+        "type": "record",
+        "name": "a.full.Name",
+        "namespace": "ignored",
+        "doc": "A name attribute with a fullname, so the namespace attribute 
is ignored. The fullname is 'a.full.Name', and the namespace is 'a.full'.",
+        "fields": [
+          {
+            "name": "inheritNamespace",
+            "type": {
+              "type": "enum",
+              "name": "Understanding",
+              "doc": "A simple name (attribute) and no namespace attribute: 
inherit the namespace of the enclosing type 'a.full.Name'. The fullname is 
'a.full.Understanding'.",
+              "symbols": ["d", "e"]
+            }
+          }
+        ]
+      }
+    }
+  ]
+}
+```
+
+The fullname of a record, enum or fixed definition is determined by the 
required `name` and optional `namespace` attributes like this:
+
+* A fullname is specified. If the name specified contains a dot, then it is 
assumed to be a fullname, and any namespace also specified is ignored. For 
example, use "name": "org.foo.X" to indicate the fullname org.foo.X.
+* A simple name (a name that contains no dots) and namespace are both 
specified. For example, one might use "name": "X", "namespace": "org.foo" to 
indicate the fullname org.foo.X.
+* A simple name only is specified (a name that contains no dots). In this case 
the namespace is taken from the most tightly enclosing named schema or 
protocol, and the fullname is constructed from that namespace and the name. For 
example, if "name": "X" is specified, and this occurs within a field of the 
record definition of org.foo.Y, then the fullname is org.foo.X. This also 
happens if there is no enclosing namespace (i.e., the enclosing schema 
definition has the null namespace).
+
+References to previously defined names are as in the latter two cases above: 
if they contain a dot they are a fullname, if they do not contain a dot, the 
namespace is the namespace of the enclosing definition.
+
+Primitive type names (`null`, `boolean`, `int`, `long`, `float`, `double`, 
`bytes`, `string`) have no namespace and their names may not be defined in any 
namespace.
+
+Complex types (`record`, `enum`, `array`, `map`, `fixed`) have no namespace, 
but their names (as well as `union`) are permitted to be reused as type names.  
This can be confusing to the human reader, but is always unambiguous for binary 
serialization.  Due to the limitations of JSON encoding, it is a best practice 
to use a namespace when using these names.
+
+A schema or protocol may not contain multiple definitions of a fullname. 
Further, a name must be defined before it is used ("before" in the depth-first, 
left-to-right traversal of the JSON parse tree, where the types attribute of a 
protocol is always deemed to come "before" the messages attribute.)
+
+### Aliases
+Named types and fields may have aliases. An implementation may optionally use 
aliases to map a writer's schema to the reader's. This facilitates both schema 
evolution as well as processing disparate datasets.
+
+Aliases function by re-writing the writer's schema using aliases from the 
reader's schema. For example, if the writer's schema was named "Foo" and the 
reader's schema is named "Bar" and has an alias of "Foo", then the 
implementation would act as though "Foo" were named "Bar" when reading. 
Similarly, if data was written as a record with a field named "x" and is read 
as a record with a field named "y" with alias "x", then the implementation 
would act as though "x" were named "y" when reading.
+
+A type alias may be specified either as a fully namespace-qualified, or 
relative to the namespace of the name it is an alias for. For example, if a 
type named "a.b" has aliases of "c" and "x.y", then the fully qualified names 
of its aliases are "a.c" and "x.y".
+
+## Data Serialization and Deserialization
+Binary encoded Avro data does not include type information or field names. The 
benefit is that the serialized data is small, but as a result a schema must 
always be used in order to read Avro data correctly. The best way to ensure 
that the schema is structurally identical to the one used to write the data is 
to use the exact same schema.
+
+Therefore, files or systems that store Avro data should always include the 
writer's schema for that data. Avro-based remote procedure call (RPC) systems 
must also guarantee that remote recipients of data have a copy of the schema 
used to write that data. In general, it is advisable that any reader of Avro 
data should use a schema that is the same (as defined more fully in [Parsing 
Canonical Form for Schemas]({{< ref "#parsing-canonical-form-for-schemas" >}} 
"Parsing Canonical Form for Sc [...]
+
+In general, both serialization and deserialization proceed as a depth-first, 
left-to-right traversal of the schema, serializing or deserializing primitive 
types as they are encountered. Therefore, it is possible, though not advisable, 
to read Avro data with a schema that does not have the same Parsing Canonical 
Form as the schema with which the data was written. In order for this to work, 
the serialized primitive values must be compatible, in order value by value, 
with the items in the d [...]
+
+### Encodings
+Avro specifies two serialization encodings: binary and JSON. Most applications 
will use the binary encoding, as it is smaller and faster. But, for debugging 
and web-based applications, the JSON encoding may sometimes be appropriate.
+
+### Binary Encoding {#binary-encoding}
+Binary encoding does not include field names, self-contained information about 
the types of individual bytes, nor field or record separators. Therefore 
readers are wholly reliant on the schema used when the data was encoded.
+
+#### Primitive Types
+Primitive types are encoded in binary as follows:
+
+* _null_ is written as zero bytes.
+* a _boolean_ is written as a single byte whose value is either 0 (false) or 1 
(true).
+* _int_ and _long_ values are written using 
[variable-length](https://lucene.apache.org/java/3_5_0/fileformats.html#VInt) 
[zig-zag](https://code.google.com/apis/protocolbuffers/docs/encoding.html#types)
 coding. Some examples:
+
+| *value* | *hex* |
+|---|---|
+| 0 | 00 |
+|-1 | 01 |
+| 1 | 02 |
+|-2 | 03 |
+| 2 | 04 |
+|...|...|
+|-64 | 7f |
+|64 | 80 01|
+|...|...|
+
+* a _float_ is written as 4 bytes. The float is converted into a 32-bit 
integer using a method equivalent to Java's 
[floatToIntBits](https://docs.oracle.com/javase/8/docs/api/java/lang/Float.html#floatToIntBits-float-)
 and then encoded in little-endian format.
+* a _double_ is written as 8 bytes. The double is converted into a 64-bit 
integer using a method equivalent to Java's 
[doubleToLongBits](https://docs.oracle.com/javase/8/docs/api/java/lang/Double.html#doubleToLongBits-double-)
 and then encoded in little-endian format.
+* _bytes_ are encoded as a long followed by that many bytes of data.
+* a _string_ is encoded as a long followed by that many bytes of UTF-8 encoded 
character data.
+For example, the three-character string "foo" would be encoded as the long 
value 3 (encoded as hex 06) followed by the UTF-8 encoding of 'f', 'o', and 'o' 
(the hex bytes 66 6f 6f):
+```
+06 66 6f 6f
+```
+
+### Complex Types
+Complex types are encoded in binary as follows:
+
+#### Records
+A record is encoded by encoding the values of its fields in the order that 
they are declared. In other words, a record is encoded as just the 
concatenation of the encodings of its fields. Field values are encoded per 
their schema.
+
+For example, the record schema
+```json
+{
+  "type": "record",
+  "name": "test",
+  "fields" : [
+    {"name": "a", "type": "long"},
+    {"name": "b", "type": "string"}
+  ]
+}
+```
+           
+An instance of this record whose a field has value 27 (encoded as hex 36) and 
whose b field has value "foo" (encoded as hex bytes 06 66 6f 6f), would be 
encoded simply as the concatenation of these, namely the hex byte sequence:
+```
+36 06 66 6f 6f
+```
+
+#### Enums
+An enum is encoded by a int, representing the zero-based position of the 
symbol in the schema.
+
+For example, consider the enum:
+```json
+{"type": "enum", "name": "Foo", "symbols": ["A", "B", "C", "D"] }
+```
+
+This would be encoded by an int between zero and three, with zero indicating 
"A", and 3 indicating "D".
+
+#### Arrays
+Arrays are encoded as a series of blocks. Each block consists of a long count 
value, followed by that many array items. A block with count zero indicates the 
end of the array. Each item is encoded per the array's item schema.
+
+If a block's count is negative, its absolute value is used, and the count is 
followed immediately by a long block size indicating the number of bytes in the 
block. This block size permits fast skipping through data, e.g., when 
projecting a record to a subset of its fields.
+
+For example, the array schema
+```json
+{"type": "array", "items": "long"}
+```
+an array containing the items 3 and 27 could be encoded as the long value 2 
(encoded as hex 04) followed by long values 3 and 27 (encoded as hex 06 36) 
terminated by zero:
+```
+04 06 36 00
+```
+
+The blocked representation permits one to read and write arrays larger than 
can be buffered in memory, since one can start writing items without knowing 
the full length of the array.
+
+#### Maps {#schema-maps}
+Maps are encoded as a series of _blocks_. Each block consists of a `long` 
_count_ value, followed by that many key/value pairs. A block with count zero 
indicates the end of the map. Each item is encoded per the map's value schema.
+
+If a block's count is negative, its absolute value is used, and the count is 
followed immediately by a `long` block size indicating the number of bytes in 
the block. This block size permits fast skipping through data, e.g., when 
projecting a record to a subset of its fields.
+
+The blocked representation permits one to read and write maps larger than can 
be buffered in memory, since one can start writing items without knowing the 
full length of the map.
+
+#### Unions
+A union is encoded by first writing an `int` value indicating the zero-based 
position within the union of the schema of its value. The value is then encoded 
per the indicated schema within the union.
+
+For example, the union schema `["null","string"]` would encode:
+
+* _null_ as zero (the index of "null" in the union):
+`00`
+* the string "a" as one (the index of "string" in the union, 1, encoded as hex 
02), followed by the serialized string:
+`02 02 61`
+NOTE: Currently for C/C++ implementations, the positions are practically an 
int, but theoretically a long. In reality, we don't expect unions with 215M 
members
+
+#### Fixed
+Fixed instances are encoded using the number of bytes declared in the schema.
+
+### JSON Encoding
+Except for unions, the JSON encoding is the same as is used to encode [field 
default values]({{< ref "#schema-record" >}}).
+
+The value of a union is encoded in JSON as follows:
+
+* if its type is _null_, then it is encoded as a JSON _null_;
+* otherwise it is encoded as a JSON object with one name/value pair whose name 
is the type's name and whose value is the recursively encoded value. For Avro's 
named types (record, fixed or enum) the user-specified name is used, for other 
types the type name is used.
+
+For example, the union schema `["null","string","Foo"]`, where Foo is a record 
name, would encode:
+
+* _null_ as _null_;
+* the string "a" as `{"string": "a"}` and
+* a Foo instance as `{"Foo": {...}}`, where `{...}` indicates the JSON 
encoding of a Foo instance.
+
+Note that the original schema is still required to correctly process 
JSON-encoded data. For example, the JSON encoding does not distinguish between 
_int_ and _long_, _float_ and _double_, records and maps, enums and strings, 
etc.
+
+### Single-object encoding
+In some situations a single Avro serialized object is to be stored for a 
longer period of time. One very common example is storing Avro records for 
several weeks in an [Apache Kafka](https://kafka.apache.org/) topic.
+
+In the period after a schema change this persistence system will contain 
records that have been written with different schemas. So the need arises to 
know which schema was used to write a record to support schema evolution 
correctly. In most cases the schema itself is too large to include in the 
message, so this binary wrapper format supports the use case more effectively.
+
+#### Single object encoding specification
+Single Avro objects are encoded as follows:
+
+1. A two-byte marker, `C3 01`, to show that the message is Avro and uses this 
single-record format (version 1).
+1. The 8-byte little-endian CRC-64-AVRO [fingerprint]({{< ref 
"#schema-fingerprints" >}} "Schema fingerprints") of the object's schema.
+1. The Avro object encoded using [Avro's binary encoding]({{< ref 
"#binary-encoding" >}}).
+
+Implementations use the 2-byte marker to determine whether a payload is Avro. 
This check helps avoid expensive lookups that resolve the schema from a 
fingerprint, when the message is not an encoded Avro payload.
+
+## Sort Order
+Avro defines a standard sort order for data. This permits data written by one 
system to be efficiently sorted by another system. This can be an important 
optimization, as sort order comparisons are sometimes the most frequent 
per-object operation. Note also that Avro binary-encoded data can be 
efficiently ordered without deserializing it to objects.
+
+Data items may only be compared if they have identical schemas. Pairwise 
comparisons are implemented recursively with a depth-first, left-to-right 
traversal of the schema. The first mismatch encountered determines the order of 
the items.
+
+Two items with the same schema are compared according to the following rules.
+
+* _null_ data is always equal.
+* _boolean_ data is ordered with false before true.
+* _int_, _long_, _float_ and _double_ data is ordered by ascending numeric 
value.
+* _bytes_ and fixed data are compared lexicographically by unsigned 8-bit 
values.
+* _string_ data is compared lexicographically by Unicode code point. Note that 
since UTF-8 is used as the binary encoding for strings, sorting of bytes and 
string binary data is identical.
+* _array_ data is compared lexicographically by element.
+* _enum_ data is ordered by the symbol's position in the enum schema. For 
example, an enum whose symbols are `["z", "a"]` would sort "z" values before 
"a" values.
+* _union_ data is first ordered by the branch within the union, and, within 
that, by the type of the branch. For example, an `["int", "string"]` union 
would order all int values before all string values, with the ints and strings 
themselves ordered as defined above.
+* _record_ data is ordered lexicographically by field. If a field specifies 
that its order is:
+    * "ascending", then the order of its values is unaltered.
+    * "descending", then the order of its values is reversed.
+    * "ignore", then its values are ignored when sorting.
+* _map_ data may not be compared. It is an error to attempt to compare data 
containing maps unless those maps are in an `"order":"ignore"` record field.
+
+## Object Container Files
+Avro includes a simple object container file format. A file has a schema, and 
all objects stored in the file must be written according to that schema, using 
binary encoding. Objects are stored in blocks that may be compressed. 
Syncronization markers are used between blocks to permit efficient splitting of 
files for MapReduce processing.
+
+Files may include arbitrary user-specified metadata.
+
+A file consists of:
+
+* A file header, followed by
+* one or more file data blocks.
+
+A file header consists of:
+
+* Four bytes, ASCII 'O', 'b', 'j', followed by 1.
+* file metadata, including the schema.
+*  The 16-byte, randomly-generated sync marker for this file.
+
+File metadata is written as if defined by the following [map]({{< ref 
"#schema-maps" >}}) schema:
+```json
+{"type": "map", "values": "bytes"}
+```
+All metadata properties that start with "avro." are reserved. The following 
file metadata properties are currently used:
+
+* **avro.schema** contains the schema of objects stored in the file, as JSON 
data (required).
+* **avro.codec** the name of the compression codec used to compress blocks, as 
a string. Implementations are required to support the following codecs: "null" 
and "deflate". If codec is absent, it is assumed to be "null". The codecs are 
described with more detail below.
+
+A file header is thus described by the following schema:
+```json
+{"type": "record", "name": "org.apache.avro.file.Header",
+ "fields" : [
+   {"name": "magic", "type": {"type": "fixed", "name": "Magic", "size": 4}},
+   {"name": "meta", "type": {"type": "map", "values": "bytes"}},
+   {"name": "sync", "type": {"type": "fixed", "name": "Sync", "size": 16}}
+  ]
+}
+```
+      
+A file data block consists of:
+
+* A long indicating the count of objects in this block.
+* A long indicating the size in bytes of the serialized objects in the current 
block, after any codec is applied
+* The serialized objects. If a codec is specified, this is compressed by that 
codec.
+* The file's 16-byte sync marker.
+
+A file data block is thus described by the following schema:
+```json
+{"type": "record", "name": "org.apache.avro.file.DataBlock",
+ "fields" : [
+   {"name": "count", "type": "long"},
+   {"name": "data", "type": "bytes"},
+   {"name": "sync", "type": {"type": "fixed", "name": "Sync", "size": 16}}
+  ]
+}
+```
+
+Each block's binary data can be efficiently extracted or skipped without 
deserializing the contents. The combination of block size, object counts, and 
sync markers enable detection of corrupt blocks and help ensure data integrity.
+
+### Required Codecs
+
+_null_
+
+The "null" codec simply passes through data uncompressed.
+
+_deflate_
+
+The "deflate" codec writes the data block using the deflate algorithm as 
specified in [RFC 1951](https://www.isi.edu/in-notes/rfc1951.txt), and 
typically implemented using the zlib library. Note that this format (unlike the 
"zlib format" in RFC 1950) does not have a checksum.
+
+### Optional Codecs
+_bzip2_
+
+The "bzip2" codec uses the [bzip2](https://sourceware.org/bzip2/) compression 
library.
+
+_snappy_
+
+The "snappy" codec uses Google's [Snappy](https://code.google.com/p/snappy/) 
compression library. Each compressed block is followed by the 4-byte, 
big-endian CRC32 checksum of the uncompressed data in the block.
+
+_xz_
+
+The "xz" codec uses the [XZ](https://tukaani.org/xz/) compression library.
+
+_zstandard_
+
+The "zstandard" codec uses Facebook's 
[Zstandard](https://facebook.github.io/zstd/) compression library.
+
+### Protocol Declaration
+Avro protocols describe RPC interfaces. Like schemas, they are defined with 
JSON text.
+
+A protocol is a JSON object with the following attributes:
+
+* _protocol_, a string, the name of the protocol (required);
+* _namespace_, an optional string that qualifies the name (optional);
+* _doc_, an optional string describing this protocol;
+* _types_, an optional list of definitions of named types (records, enums, 
fixed and errors). An error definition is just like a record definition except 
it uses "error" instead of "record". Note that forward references to named 
types are not permitted.
+* _messages_, an optional JSON object whose keys are message names and whose 
values are objects whose attributes are described below. No two messages may 
have the same name.
+
+The name and namespace qualification rules defined for schema objects apply to 
protocols as well.
+
+### Messages
+A message has attributes:
+
+* a _doc_, an optional description of the message,
+* a _request_, a list of named, typed parameter schemas (this has the same 
form as the fields of a record declaration);
+* a _response_ schema;
+* an optional union of declared error schemas. The effective union has 
"string" prepended to the declared union, to permit transmission of undeclared 
"system" errors. For example, if the declared error union is `["AccessError"]`, 
then the effective union is `["string", "AccessError"]`. When no errors are 
declared, the effective error union is `["string"]`. Errors are serialized 
using the effective union; however, a protocol's JSON declaration contains only 
the declared union.
+* an optional one-way boolean parameter.
+
+A request parameter list is processed equivalently to an anonymous record. 
Since record field lists may vary between reader and writer, request parameters 
may also differ between the caller and responder, and such differences are 
resolved in the same manner as record field differences.
+
+The one-way parameter may only be true when the response type is `"null"` and 
no errors are listed.
+
+### Sample Protocol
+For example, one may define a simple HelloWorld protocol with:
+```json
+{
+  "namespace": "com.acme",
+  "protocol": "HelloWorld",
+  "doc": "Protocol Greetings",
+
+  "types": [
+    {"name": "Greeting", "type": "record", "fields": [
+      {"name": "message", "type": "string"}]},
+    {"name": "Curse", "type": "error", "fields": [
+      {"name": "message", "type": "string"}]}
+  ],
+
+  "messages": {
+    "hello": {
+      "doc": "Say hello.",
+      "request": [{"name": "greeting", "type": "Greeting" }],
+      "response": "Greeting",
+      "errors": ["Curse"]
+    }
+  }
+}
+```
+        
+## Protocol Wire Format
+
+### Message Transport
+Messages may be transmitted via different transport mechanisms.
+
+To the transport, a _message_ is an opaque byte sequence.
+
+A transport is a system that supports:
+
+* **transmission of request messages**
+* **receipt of corresponding response messages**
+Servers may send a response message back to the client corresponding to a 
request message. The mechanism of correspondence is transport-specific. For 
example, in HTTP it is implicit, since HTTP directly supports requests and 
responses. But a transport that multiplexes many client threads over a single 
socket would need to tag messages with unique identifiers.
+
+Transports may be either stateless or stateful. In a stateless transport, 
messaging assumes no established connection state, while stateful transports 
establish connections that may be used for multiple messages. This distinction 
is discussed further in the [handshake](#handshake) section below.
+
+#### HTTP as Transport
+When [HTTP](https://www.w3.org/Protocols/rfc2616/rfc2616.html) is used as a 
transport, each Avro message exchange is an HTTP request/response pair. All 
messages of an Avro protocol should share a single URL at an HTTP server. Other 
protocols may also use that URL. Both normal and error Avro response messages 
should use the 200 (OK) response code. The chunked encoding may be used for 
requests and responses, but, regardless the Avro request and response are the 
entire content of an HTTP re [...]
+
+HTTP is used by Avro as a stateless transport.
+
+### Message Framing
+Avro messages are _framed_ as a list of buffers.
+
+Framing is a layer between messages and the transport. It exists to optimize 
certain operations.
+
+The format of framed message data is:
+
+* a series of buffers, where each buffer consists of:
+    * a four-byte, big-endian _buffer length_, followed by
+    * that many bytes of _buffer_ data.
+* a message is always terminated by a zero-length buffer.
+
+Framing is transparent to request and response message formats (described 
below). Any message may be presented as a single or multiple buffers.
+
+Framing can permit readers to more efficiently get different buffers from 
different sources and for writers to more efficiently store different buffers 
to different destinations. In particular, it can reduce the number of times 
large binary objects are copied. For example, if an RPC parameter consists of a 
megabyte of file data, that data can be copied directly to a socket from a file 
descriptor, and, on the other end, it could be written directly to a file 
descriptor, never entering use [...]
+
+A simple, recommended, framing policy is for writers to create a new segment 
whenever a single binary object is written that is larger than a normal output 
buffer. Small objects are then appended in buffers, while larger objects are 
written as their own buffers. When a reader then tries to read a large object 
the runtime can hand it an entire buffer directly, without having to copy it.
+
+### Handshake
+The purpose of the handshake is to ensure that the client and the server have 
each other's protocol definition, so that the client can correctly deserialize 
responses, and the server can correctly deserialize requests. Both clients and 
servers should maintain a cache of recently seen protocols, so that, in most 
cases, a handshake will be completed without extra round-trip network exchanges 
or the transmission of full protocol text.
+
+RPC requests and responses may not be processed until a handshake has been 
completed. With a stateless transport, all requests and responses are prefixed 
by handshakes. With a stateful transport, handshakes are only attached to 
requests and responses until a successful handshake response has been returned 
over a connection. After this, request and response payloads are sent without 
handshakes for the lifetime of that connection.
+
+The handshake process uses the following record schemas:
+```json
+{
+  "type": "record",
+  "name": "HandshakeRequest", "namespace":"org.apache.avro.ipc",
+  "fields": [
+    {"name": "clientHash",
+     "type": {"type": "fixed", "name": "MD5", "size": 16}},
+    {"name": "clientProtocol", "type": ["null", "string"]},
+    {"name": "serverHash", "type": "MD5"},
+    {"name": "meta", "type": ["null", {"type": "map", "values": "bytes"}]}
+  ]
+}
+{
+  "type": "record",
+  "name": "HandshakeResponse", "namespace": "org.apache.avro.ipc",
+  "fields": [
+    {"name": "match",
+     "type": {"type": "enum", "name": "HandshakeMatch",
+              "symbols": ["BOTH", "CLIENT", "NONE"]}},
+    {"name": "serverProtocol",
+     "type": ["null", "string"]},
+    {"name": "serverHash",
+     "type": ["null", {"type": "fixed", "name": "MD5", "size": 16}]},
+    {"name": "meta",
+     "type": ["null", {"type": "map", "values": "bytes"}]}
+  ]
+}
+```
+        
+* A client first prefixes each request with a `HandshakeRequest` containing 
just the hash of its protocol and of the server's protocol (`clientHash!=null, 
clientProtocol=null, serverHash!=null`), where the hashes are 128-bit MD5 
hashes of the JSON protocol text. If a client has never connected to a given 
server, it sends its hash as a guess of the server's hash, otherwise it sends 
the hash that it previously obtained from this server.
+The server responds with a HandshakeResponse containing one of:
+  * `match=BOTH, serverProtocol=null, serverHash=null` if the client sent the 
valid hash of the server's protocol and the server knows what protocol 
corresponds to the client's hash. In this case, the request is complete and the 
response data immediately follows the HandshakeResponse.
+  * `match=CLIENT, serverProtocol!=null, serverHash!=null` if the server has 
previously seen the client's protocol, but the client sent an incorrect hash of 
the server's protocol. The request is complete and the response data 
immediately follows the HandshakeResponse. The client must use the returned 
protocol to process the response and should also cache that protocol and its 
hash for future interactions with this server.
+  * `match=NONE` if the server has not previously seen the client's protocol. 
The serverHash and serverProtocol may also be non-null if the server's protocol 
hash was incorrect.
+In this case the client must then re-submit its request with its protocol text 
(`clientHash!=null, clientProtocol!=null, serverHash!=null`) and the server 
should respond with a successful match (match=BOTH, serverProtocol=null, 
serverHash=null) as above.
+
+The meta field is reserved for future handshake enhancements.
+
+### Call Format
+A _call_ consists of a request message paired with its resulting response or 
error message. Requests and responses contain extensible metadata, and both 
kinds of messages are framed as described above.
+
+The format of a call request is:
+
+* _request metadata_, a map with values of type bytes
+* the _message name_, an Avro string, followed by
+* the _message parameters_. Parameters are serialized according to the 
message's request declaration.
+When the empty string is used as a message name a server should ignore the 
parameters and return an empty response. A client may use this to ping a server 
or to perform a handshake without sending a protocol message.
+
+When a message is declared one-way and a stateful connection has been 
established by a successful handshake response, no response data is sent. 
Otherwise the format of the call response is:
+
+* _response metadata_, a map with values of type bytes
+* a one-byte error _flag_ boolean, followed by either:
+  * if the error flag is false, the message _response_, serialized per the 
message's response schema.
+  * if the error flag is true, the _error_, serialized per the message's 
effective error union schema.
+
+### Schema Resolution {#schema-resolution}
+A reader of Avro data, whether from an RPC or a file, can always parse that 
data because the original schema must be provided along with the data. However, 
the reader may be programmed to read data into a different schema. For example, 
if the data was written with a different version of the software than it is 
read, then fields may have been added or removed from records. This section 
specifies how such schema differences should be resolved.
+
+We refer to the schema used to write the data as the writer's schema, and the 
schema that the application expects the reader's schema. Differences between 
these should be resolved as follows:
+
+* It is an error if the two schemas do not _match_.
+To match, one of the following must hold:
+  * both schemas are arrays whose item types match
+  * both schemas are maps whose value types match
+  * both schemas are enums whose (unqualified) names match
+  * both schemas are fixed whose sizes and (unqualified) names match
+  * both schemas are records with the same (unqualified) name
+  * either schema is a union
+  * both schemas have same primitive type
+  * the writer's schema may be promoted to the reader's as follows:
+    * int is promotable to long, float, or double
+    * long is promotable to float or double
+    * float is promotable to double
+    * string is promotable to bytes
+    * bytes is promotable to string
+* **if both are records**:
+  * the ordering of fields may be different: fields are matched by name.
+  * schemas for fields with the same name in both records are resolved 
recursively.
+  * if the writer's record contains a field with a name not present in the 
reader's record, the writer's value for that field is ignored.
+  * if the reader's record schema has a field that contains a default value, 
and writer's schema does not have a field with the same name, then the reader 
should use the default value from its field.
+  * if the reader's record schema has a field with no default value, and 
writer's schema does not have a field with the same name, an error is signalled.
+* **if both are enums**:
+if the writer's symbol is not present in the reader's enum and the reader has 
a default value, then that value is used, otherwise an error is signalled.
+
+* **if both are arrays**:
+This resolution algorithm is applied recursively to the reader's and writer's 
array item schemas.
+
+* **if both are maps**:
+This resolution algorithm is applied recursively to the reader's and writer's 
value schemas.
+
+* **if both are unions**:
+The first schema in the reader's union that matches the selected writer's 
union schema is recursively resolved against it. if none match, an error is 
signalled.
+
+* **if reader's is a union, but writer's is not**
+The first schema in the reader's union that matches the writer's schema is 
recursively resolved against it. If none match, an error is signalled.
+
+* **if writer's is a union, but reader's is not**
+If the reader's schema matches the selected writer's schema, it is recursively 
resolved against it. If they do not match, an error is signalled.
+
+A schema's _doc_ fields are ignored for the purposes of schema resolution. 
Hence, the _doc_ portion of a schema may be dropped at serialization.
+
+### Parsing Canonical Form for Schemas {#parsing-canonical-form-for-schemas}
+One of the defining characteristics of Avro is that a reader must use the 
schema used by the writer of the data in order to know how to read the data. 
This assumption results in a data format that's compact and also amenable to 
many forms of schema evolution. However, the specification so far has not 
defined what it means for the reader to have the "same" schema as the writer. 
Does the schema need to be textually identical? Well, clearly adding or 
removing some whitespace to a JSON expre [...]
+
+Parsing Canonical Form is a transformation of a writer's schema that let's us 
define what it means for two schemas to be "the same" for the purpose of 
reading data written against the schema. It is called Parsing Canonical Form 
because the transformations strip away parts of the schema, like "doc" 
attributes, that are irrelevant to readers trying to parse incoming data. It is 
called Canonical Form because the transformations normalize the JSON text (such 
as the order of attributes) in a  [...]
+
+The next subsection specifies the transformations that define Parsing 
Canonical Form. But with a well-defined canonical form, it can be convenient to 
go one step further, transforming these canonical forms into simple integers 
("fingerprints") that can be used to uniquely identify schemas. The subsection 
after next recommends some standard practices for generating such fingerprints.
+
+#### Transforming into Parsing Canonical Form
+Assuming an input schema (in JSON form) that's already UTF-8 text for a 
_valid_ Avro schema (including all quotes as required by JSON), the following 
transformations will produce its Parsing Canonical Form:
+
+* [PRIMITIVES] Convert primitive schemas to their simple form (e.g., int 
instead of `{"type":"int"}`).
+* [FULLNAMES] Replace short names with fullnames, using applicable namespaces 
to do so. Then eliminate namespace attributes, which are now redundant.
+* [STRIP] Keep only attributes that are relevant to parsing data, which are: 
_type_, _name_, _fields_, _symbols_, _items_, _values_, _size_. Strip all 
others (e.g., _doc_ and _aliases_).
+* [ORDER] Order the appearance of fields of JSON objects as follows: _name_, 
_type_, _fields_, _symbols_, _items_, _values_, _size_. For example, if an 
object has _type_, _name_, and _size_ fields, then the _name_ field should 
appear first, followed by the _type_ and then the _size_ fields.
+* [STRINGS] For all JSON string literals in the schema text, replace any 
escaped characters (e.g., \uXXXX escapes) with their UTF-8 equivalents.
+* [INTEGERS] Eliminate quotes around and any leading zeros in front of JSON 
integer literals (which appear in the _size_ attributes of _fixed_ schemas).
+* [WHITESPACE] Eliminate all whitespace in JSON outside of string literals.
+
+#### Schema Fingerprints {#schema-fingerprints}
+"[A] fingerprinting algorithm is a procedure that maps an arbitrarily large 
data item (such as a computer file) to a much shorter bit string, its 
fingerprint, that uniquely identifies the original data for all practical 
purposes" (quoted from 
[Wikipedia](https://en.wikipedia.org/wiki/Fingerprint_(computing))). In the 
Avro context, fingerprints of Parsing Canonical Form can be useful in a number 
of applications; for example, to cache encoder and decoder objects, to tag data 
items with a s [...]
+
+In designing fingerprinting algorithms, there is a fundamental trade-off 
between the length of the fingerprint and the probability of collisions. To 
help application designers find appropriate points within this trade-off space, 
while encouraging interoperability and ease of implementation, we recommend 
using one of the following three algorithms when fingerprinting Avro schemas:
+
+* When applications can tolerate longer fingerprints, we recommend using the 
[SHA-256 digest algorithm](https://en.wikipedia.org/wiki/SHA-2) to generate 
256-bit fingerprints of Parsing Canonical Forms. Most languages today have 
SHA-256 implementations in their libraries.
+* At the opposite extreme, the smallest fingerprint we recommend is a 64-bit 
[Rabin fingerprint](https://en.wikipedia.org/wiki/Rabin_fingerprint). Below, we 
provide pseudo-code for this algorithm that can be easily translated into any 
programming language. 64-bit fingerprints should guarantee uniqueness for 
schema caches of up to a million entries (for such a cache, the chance of a 
collision is 3E-8). We don't recommend shorter fingerprints, as the chances of 
collisions is too great (for [...]
+* Between these two extremes, we recommend using the [MD5 message 
digest](https://en.wikipedia.org/wiki/MD5) to generate 128-bit fingerprints. 
These make sense only where very large numbers of schemas are being manipulated 
(tens of millions); otherwise, 64-bit fingerprints should be sufficient. As 
with SHA-256, MD5 implementations are found in most libraries today.
+
+These fingerprints are not meant to provide any security guarantees, even the 
longer SHA-256-based ones. Most Avro applications should be surrounded by 
security measures that prevent attackers from writing random data and otherwise 
interfering with the consumers of schemas. We recommend that these surrounding 
mechanisms be used to prevent collision and pre-image attacks (i.e., "forgery") 
on schema fingerprints, rather than relying on the security properties of the 
fingerprints themselves.
+
+Rabin fingerprints are [cyclic redundancy 
checks](https://en.wikipedia.org/wiki/Cyclic_redundancy_check) computed using 
irreducible polynomials. In the style of the Appendix of [RFC 
1952](https://www.ietf.org/rfc/rfc1952.txt) (pg 10), which defines the CRC-32 
algorithm, here's our definition of the 64-bit AVRO fingerprinting algorithm:
+```java
+long fingerprint64(byte[] buf) {
+  if (FP_TABLE == null) initFPTable();
+  long fp = EMPTY;
+  for (int i = 0; i < buf.length; i++)
+    fp = (fp >>> 8) ^ FP_TABLE[(int)(fp ^ buf[i]) & 0xff];
+  return fp;
+}
+
+static long EMPTY = 0xc15d213aa4d7a795L;
+static long[] FP_TABLE = null;
+
+void initFPTable() {
+  FP_TABLE = new long[256];
+  for (int i = 0; i < 256; i++) {
+    long fp = i;
+    for (int j = 0; j < 8; j++)
+      fp = (fp >>> 1) ^ (EMPTY & -(fp & 1L));
+    FP_TABLE[i] = fp;
+  }
+}
+```
+
+Readers interested in the mathematics behind this algorithm may want to read 
[Chapter 14 of the Second Edition of Hacker's 
Delight](https://books.google.com/books?id=XD9iAwAAQBAJ&pg=PA319). (Unlike 
RFC-1952 and the book chapter, we prepend a single one bit to messages. We do 
this because CRCs ignore leading zero bits, which can be problematic. Our code 
prepends a one-bit by initializing fingerprints using EMPTY, rather than 
initializing using zero as in RFC-1952 and the book chapter.)
+
+## Logical Types
+A logical type is an Avro primitive or complex type with extra attributes to 
represent a derived type. The attribute `logicalType` must always be present 
for a logical type, and is a string with the name of one of the logical types 
listed later in this section. Other attributes may be defined for particular 
logical types.
+
+A logical type is always serialized using its underlying Avro type so that 
values are encoded in exactly the same way as the equivalent Avro type that 
does not have a `logicalType` attribute. Language implementations may choose to 
represent logical types with an appropriate native type, although this is not 
required.
+
+Language implementations must ignore unknown logical types when reading, and 
should use the underlying Avro type. If a logical type is invalid, for example 
a decimal with scale greater than its precision, then implementations should 
ignore the logical type and use the underlying Avro type.
+
+### Decimal
+The `decimal` logical type represents an arbitrary-precision signed decimal 
number of the form _unscaled × 10<sup>-scale</sup>_.
+
+A `decimal` logical type annotates Avro _bytes_ or _fixed_ types. The byte 
array must contain the two's-complement representation of the unscaled integer 
value in big-endian byte order. The scale is fixed, and is specified using an 
attribute.
+
+The following attributes are supported:
+
+* _scale_, a JSON integer representing the scale (optional). If not specified 
the scale is 0.
+* _precision_, a JSON integer representing the (maximum) precision of decimals 
stored in this type (required).
+For example, the following schema represents decimal numbers with a maximum 
precision of 4 and a scale of 2:
+```json
+{
+  "type": "bytes",
+  "logicalType": "decimal",
+  "precision": 4,
+  "scale": 2
+}
+```
+Precision must be a positive integer greater than zero. If the underlying type 
is a _fixed_, then the precision is limited by its size. An array of length n 
can store at most _floor(log<sub>10</sub>(2<sup>8 × n - 1</sup> - 1))_ base-10 
digits of precision.
+
+Scale must be zero or a positive integer less than or equal to the precision.
+
+For the purposes of schema resolution, two schemas that are `decimal` logical 
types _match_ if their scales and precisions match.
+
+### UUID
+The `uuid` logical type represents a random generated universally unique 
identifier (UUID).
+
+A `uuid` logical type annotates an Avro `string`. The string has to conform 
with [RFC-4122](https://www.ietf.org/rfc/rfc4122.txt)
+
+### Date
+The `date` logical type represents a date within the calendar, with no 
reference to a particular time zone or time of day.
+
+A `date` logical type annotates an Avro `int`, where the int stores the number 
of days from the unix epoch, 1 January 1970 (ISO calendar).
+
+The following schema represents a date:
+```json
+{
+  "type": "int",
+  "logicalType": "date"
+}
+```
+
+### Time (millisecond precision)
+The `time-millis` logical type represents a time of day, with no reference to 
a particular calendar, time zone or date, with a precision of one millisecond.
+
+A `time-millis` logical type annotates an Avro `int`, where the int stores the 
number of milliseconds after midnight, 00:00:00.000.
+
+### Time (microsecond precision)
+The `time-micros` logical type represents a time of day, with no reference to 
a particular calendar, time zone or date, with a precision of one microsecond.
+
+A `time-micros` logical type annotates an Avro `long`, where the long stores 
the number of microseconds after midnight, 00:00:00.000000.
+
+### Timestamp (millisecond precision)
+The `timestamp-millis` logical type represents an instant on the global 
timeline, independent of a particular time zone or calendar, with a precision 
of one millisecond. Please note that time zone information gets lost in this 
process. Upon reading a value back, we can only reconstruct the instant, but 
not the original representation. In practice, such timestamps are typically 
displayed to users in their local time zones, therefore they may be displayed 
differently depending on the execu [...]
+
+A `timestamp-millis` logical type annotates an Avro `long`, where the long 
stores the number of milliseconds from the unix epoch, 1 January 1970 
00:00:00.000 UTC.
+
+### Timestamp (microsecond precision)
+The `timestamp-micros` logical type represents an instant on the global 
timeline, independent of a particular time zone or calendar, with a precision 
of one microsecond. Please note that time zone information gets lost in this 
process. Upon reading a value back, we can only reconstruct the instant, but 
not the original representation. In practice, such timestamps are typically 
displayed to users in their local time zones, therefore they may be displayed 
differently depending on the execu [...]
+
+A `timestamp-micros` logical type annotates an Avro `long`, where the long 
stores the number of microseconds from the unix epoch, 1 January 1970 
00:00:00.000000 UTC.
+
+### Local timestamp (millisecond precision)
+The `local-timestamp-millis` logical type represents a timestamp in a local 
timezone, regardless of what specific time zone is considered local, with a 
precision of one millisecond.
+
+A `local-timestamp-millis` logical type annotates an Avro `long`, where the 
long stores the number of milliseconds, from 1 January 1970 00:00:00.000.
+
+### Local timestamp (microsecond precision)
+The `local-timestamp-micros` logical type represents a timestamp in a local 
timezone, regardless of what specific time zone is considered local, with a 
precision of one microsecond.
+
+A `local-timestamp-micros` logical type annotates an Avro `long`, where the 
long stores the number of microseconds, from 1 January 1970 00:00:00.000000.
+
+### Duration
+The `duration` logical type represents an amount of time defined by a number 
of months, days and milliseconds. This is not equivalent to a number of 
milliseconds, because, depending on the moment in time from which the duration 
is measured, the number of days in the month and number of milliseconds in a 
day may differ. Other standard periods such as years, quarters, hours and 
minutes can be expressed through these basic periods.
+
+A `duration` logical type annotates Avro `fixed` type of size 12, which stores 
three little-endian unsigned integers that represent durations at different 
granularities of time. The first stores a number in months, the second stores a 
number in days, and the third stores a number in milliseconds.
diff --git a/doc/content/en/docs/1.11.4/_index.md 
b/doc/content/en/docs/1.11.4/_index.md
new file mode 100755
index 000000000..f26bc558f
--- /dev/null
+++ b/doc/content/en/docs/1.11.4/_index.md
@@ -0,0 +1,59 @@
+---
+title: "Apache Avro™ 1.11.4 Documentation"
+linkTitle: "1.11.4"
+type: docs
+weight: -1113
+---
+
+<!--
+
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+   https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+
+-->
+
+## Introduction
+
+Apache Avro™ is a data serialization system.
+
+Avro provides:
+
+* Rich data structures.
+* A compact, fast, binary data format.
+* A container file, to store persistent data.
+* Remote procedure call (RPC).
+* Simple integration with dynamic languages. Code generation is not required 
to read or write data files nor to use or implement RPC protocols. Code 
generation as an optional optimization, only worth implementing for statically 
typed languages.
+
+## Schemas
+
+Avro relies on schemas. When Avro data is read, the schema used when writing 
it is always present. This permits each datum to be written with no per-value 
overheads, making serialization both fast and small. This also facilitates use 
with dynamic, scripting languages, since data, together with its schema, is 
fully self-describing.
+
+When Avro data is stored in a file, its schema is stored with it, so that 
files may be processed later by any program. If the program reading the data 
expects a different schema this can be easily resolved, since both schemas are 
present.
+
+When Avro is used in RPC, the client and server exchange schemas in the 
connection handshake. (This can be optimized so that, for most calls, no 
schemas are actually transmitted.) Since both client and server both have the 
other's full schema, correspondence between same named fields, missing fields, 
extra fields, etc. can all be easily resolved.
+
+Avro schemas are defined with JSON . This facilitates implementation in 
languages that already have JSON libraries.
+
+## Comparison with other systems
+
+Avro provides functionality similar to systems such as 
[Thrift](https://thrift.apache.org/), [Protocol 
Buffers](https://code.google.com/p/protobuf/), etc. Avro differs from these 
systems in the following fundamental aspects.
+
+* Dynamic typing: Avro does not require that code be generated. Data is always 
accompanied by a schema that permits full processing of that data without code 
generation, static datatypes, etc. This facilitates construction of generic 
data-processing systems and languages.
+* Untagged data: Since the schema is present when data is read, considerably 
less type information need be encoded with data, resulting in smaller 
serialization size.
+* No manually-assigned field IDs: When a schema changes, both the old and new 
schema are always present when processing data, so differences may be resolved 
symbolically, using field names.
+
+
diff --git a/doc/content/en/docs/1.11.4/api-c++.md 
b/doc/content/en/docs/1.11.4/api-c++.md
new file mode 100644
index 000000000..110508ceb
--- /dev/null
+++ b/doc/content/en/docs/1.11.4/api-c++.md
@@ -0,0 +1,29 @@
+---
+title: "C++ API"
+linkTitle: "C++ API"
+weight: 102
+manualLink: /docs/1.11.4/api/cpp/html/
+---
+
+<!--
+
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+   https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+
+-->
+
+The C++ API documentation can be found <a 
href="/docs/1.11.4/api/cpp/html/">here</a>.
diff --git a/doc/content/en/docs/1.11.4/api-c.md 
b/doc/content/en/docs/1.11.4/api-c.md
new file mode 100644
index 000000000..91b20b3ba
--- /dev/null
+++ b/doc/content/en/docs/1.11.4/api-c.md
@@ -0,0 +1,29 @@
+---
+title: "C API"
+linkTitle: "C API"
+weight: 101
+manualLink: /docs/1.11.4/api/c/
+---
+
+<!--
+
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+   https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+
+-->
+
+The C API documentation can be found <a href="/docs/1.11.4/api/c/">here</a>.
diff --git a/doc/content/en/docs/1.11.4/api-csharp.md 
b/doc/content/en/docs/1.11.4/api-csharp.md
new file mode 100644
index 000000000..ca56d9189
--- /dev/null
+++ b/doc/content/en/docs/1.11.4/api-csharp.md
@@ -0,0 +1,29 @@
+---
+title: "C# API"
+linkTitle: "C# API"
+weight: 103
+manualLink: /docs/1.11.4/api/csharp/html/
+---
+
+<!--
+
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+   https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+
+-->
+
+The C# API documentation can be found <a 
href="/docs/1.11.4/api/csharp/html/">here</a>.
diff --git a/doc/content/en/docs/1.11.4/api-java.md 
b/doc/content/en/docs/1.11.4/api-java.md
new file mode 100644
index 000000000..1dc9568b5
--- /dev/null
+++ b/doc/content/en/docs/1.11.4/api-java.md
@@ -0,0 +1,29 @@
+---
+title: "Java API"
+linkTitle: "Java API"
+weight: 100
+manualLink: /docs/1.11.4/api/java/
+---
+
+<!--
+
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+   https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+
+-->
+
+The Javadocs can be found <a href="/docs/1.11.4/api/java/">here</a>.
diff --git a/doc/content/en/docs/1.11.4/api-py.md 
b/doc/content/en/docs/1.11.4/api-py.md
new file mode 100644
index 000000000..c43ec8ce3
--- /dev/null
+++ b/doc/content/en/docs/1.11.4/api-py.md
@@ -0,0 +1,29 @@
+---
+title: "Python API"
+linkTitle: "Python API"
+weight: 104
+manualLink: /docs/1.11.4/api/py/html/
+---
+
+<!--
+
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+   https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+
+-->
+
+The Python API documentation can be found <a 
href="/docs/1.11.4/api/py/html/">here</a>.
diff --git a/doc/content/en/docs/1.11.4/logo.svg 
b/doc/content/en/docs/1.11.4/logo.svg
new file mode 100644
index 000000000..b44ed1972
--- /dev/null
+++ b/doc/content/en/docs/1.11.4/logo.svg
@@ -0,0 +1,22 @@
+<?xml version="1.0" standalone="yes"?>
+<!--
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+-->
+<svg id="svg" version="1.1" xmlns="http://www.w3.org/2000/svg"; 
xmlns:xlink="http://www.w3.org/1999/xlink"; width="400" 
height="124.32432432432432" viewBox="0, 0, 400,124.32432432432432"><g 
id="svgg"><path id="path0" d="M0.000 2.833 C 0.000 4.494,0.002 4.500,0.411 
4.500 C 0.970 4.500,1.412 5.451,1.737 7.355 C 1.794 7.687,2.038 8.172,2.279 
8.432 C 2.520 8.692,2.859 9.225,3.032 9.617 C 3.205 10.008,3.500 10.411,3.688 
10.511 C 3.875 10.611,4.128 10.932,4.250 11.224 C 4.372 11.515,4.710 12.004 
[...]
diff --git a/doc/examples/java-example/pom.xml 
b/doc/examples/java-example/pom.xml
index 66502ed0b..0b3f90fdd 100644
--- a/doc/examples/java-example/pom.xml
+++ b/doc/examples/java-example/pom.xml
@@ -26,7 +26,7 @@
   <name>java-example</name>
   <url>https://maven.apache.org</url>
   <properties>
-    <avro.version>1.11.3</avro.version>
+    <avro.version>1.12.0</avro.version>
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
   </properties>
   <dependencies>
diff --git a/doc/examples/mr-example/pom.xml b/doc/examples/mr-example/pom.xml
index be6b689b0..f5bb873be 100644
--- a/doc/examples/mr-example/pom.xml
+++ b/doc/examples/mr-example/pom.xml
@@ -28,7 +28,7 @@
   <name>mr-example</name>
 
   <properties>
-    <avro.version>1.11.3</avro.version>
+    <avro.version>1.12.0</avro.version>
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
   </properties>
 


Reply via email to