commit
e6541f2667
@ -0,0 +1,8 @@
|
||||
# Default ignored files
|
||||
/shelf/
|
||||
/workspace.xml
|
||||
# Editor-based HTTP Client requests
|
||||
/httpRequests/
|
||||
# Datasource local storage ignored files
|
||||
/dataSources/
|
||||
/dataSources.local.xml
|
@ -0,0 +1,9 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<module type="WEB_MODULE" version="4">
|
||||
<component name="Go" enabled="true" />
|
||||
<component name="NewModuleRootManager">
|
||||
<content url="file://$MODULE_DIR$" />
|
||||
<orderEntry type="inheritedJdk" />
|
||||
<orderEntry type="sourceFolder" forTests="false" />
|
||||
</component>
|
||||
</module>
|
@ -0,0 +1,8 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="ProjectModuleManager">
|
||||
<modules>
|
||||
<module fileurl="file://$PROJECT_DIR$/.idea/education-main.iml" filepath="$PROJECT_DIR$/.idea/education-main.iml" />
|
||||
</modules>
|
||||
</component>
|
||||
</project>
|
@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
@ -0,0 +1,20 @@
|
||||
# education
|
||||
将`GOPATH`设置为`/root/go`,拉取项目:
|
||||
```
|
||||
cd $GOPATH/src && git clone https://github.com/sxguan/education.git
|
||||
```
|
||||
在`/etc/hosts`中添加:
|
||||
```
|
||||
127.0.0.1 orderer.example.com
|
||||
127.0.0.1 peer0.org1.example.com
|
||||
127.0.0.1 peer1.org1.example.com
|
||||
```
|
||||
添加依赖:
|
||||
```
|
||||
cd education && go mod tidy
|
||||
```
|
||||
运行项目:
|
||||
```
|
||||
./clean_docker.sh
|
||||
```
|
||||
在`127.0.0.1:9000`进行访问
|
@ -0,0 +1,363 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/hyperledger/fabric-chaincode-go/shim"
|
||||
"github.com/hyperledger/fabric-protos-go/peer"
|
||||
"fmt"
|
||||
"encoding/json"
|
||||
"bytes"
|
||||
|
||||
)
|
||||
|
||||
|
||||
type Education struct {
|
||||
ObjectType string `json:"docType"`
|
||||
Name string `json:"Name"` // 姓名
|
||||
Gender string `json:"Gender"` // 性别
|
||||
Nation string `json:"Nation"` // 民族
|
||||
EntityID string `json:"EntityID"` // 身份证号
|
||||
Place string `json:"Place"` // 籍贯
|
||||
BirthDay string `json:"BirthDay"` // 出生日期
|
||||
|
||||
EnrollDate string `json:"EnrollDate"` // 入学日期
|
||||
GraduationDate string `json:"GraduationDate"` // 毕(结)业日期
|
||||
SchoolName string `json:"SchoolName"` // 学校名称
|
||||
Major string `json:"Major"` // 专业
|
||||
QuaType string `json:"QuaType"` // 学历类别
|
||||
Length string `json:"Length"` // 学制
|
||||
Mode string `json:"Mode"` // 学习形式
|
||||
Level string `json:"Level"` // 层次
|
||||
Graduation string `json:"Graduation"` // 毕(结)业
|
||||
CertNo string `json:"CertNo"` // 证书编号
|
||||
|
||||
Photo string `json:"Photo"` // 照片
|
||||
|
||||
Historys []HistoryItem // 当前edu的历史记录
|
||||
}
|
||||
|
||||
type HistoryItem struct {
|
||||
TxId string
|
||||
Education Education
|
||||
}
|
||||
|
||||
type EducationChaincode struct {
|
||||
|
||||
}
|
||||
|
||||
func (t *EducationChaincode) Init(stub shim.ChaincodeStubInterface) peer.Response{
|
||||
fmt.Println(" ==== Init ====")
|
||||
|
||||
return shim.Success(nil)
|
||||
}
|
||||
|
||||
func (t *EducationChaincode) Invoke(stub shim.ChaincodeStubInterface) peer.Response{
|
||||
// 获取用户意图
|
||||
fun, args := stub.GetFunctionAndParameters()
|
||||
|
||||
if fun == "addEdu"{
|
||||
return t.addEdu(stub, args) // 添加信息
|
||||
}else if fun == "queryEduByCertNoAndName" {
|
||||
return t.queryEduByCertNoAndName(stub, args) // 根据证书编号及姓名查询信息
|
||||
}else if fun == "queryEduInfoByEntityID" {
|
||||
return t.queryEduInfoByEntityID(stub, args) // 根据身份证号码及姓名查询详情
|
||||
}else if fun == "updateEdu" {
|
||||
return t.updateEdu(stub, args) // 根据证书编号更新信息
|
||||
}else if fun == "delEdu"{
|
||||
return t.delEdu(stub, args) // 根据证书编号删除信息
|
||||
}
|
||||
|
||||
return shim.Error("指定的函数名称错误")
|
||||
|
||||
}
|
||||
|
||||
|
||||
const DOC_TYPE = "eduObj"
|
||||
|
||||
// 保存edu
|
||||
// args: education
|
||||
func PutEdu(stub shim.ChaincodeStubInterface, edu Education) ([]byte, bool) {
|
||||
|
||||
edu.ObjectType = DOC_TYPE
|
||||
|
||||
b, err := json.Marshal(edu)
|
||||
if err != nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// 保存edu状态
|
||||
err = stub.PutState(edu.EntityID, b)
|
||||
if err != nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return b, true
|
||||
}
|
||||
|
||||
// 根据身份证号码查询信息状态
|
||||
// args: entityID
|
||||
func GetEduInfo(stub shim.ChaincodeStubInterface, entityID string) (Education, bool) {
|
||||
var edu Education
|
||||
// 根据身份证号码查询信息状态
|
||||
b, err := stub.GetState(entityID)
|
||||
if err != nil {
|
||||
return edu, false
|
||||
}
|
||||
|
||||
if b == nil {
|
||||
return edu, false
|
||||
}
|
||||
|
||||
// 对查询到的状态进行反序列化
|
||||
err = json.Unmarshal(b, &edu)
|
||||
if err != nil {
|
||||
return edu, false
|
||||
}
|
||||
|
||||
// 返回结果
|
||||
return edu, true
|
||||
}
|
||||
|
||||
// 根据指定的查询字符串实现富查询
|
||||
func getEduByQueryString(stub shim.ChaincodeStubInterface, queryString string) ([]byte, error) {
|
||||
|
||||
resultsIterator, err := stub.GetQueryResult(queryString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resultsIterator.Close()
|
||||
|
||||
// buffer is a JSON array containing QueryRecords
|
||||
var buffer bytes.Buffer
|
||||
|
||||
bArrayMemberAlreadyWritten := false
|
||||
for resultsIterator.HasNext() {
|
||||
queryResponse, err := resultsIterator.Next()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Add a comma before array members, suppress it for the first array member
|
||||
if bArrayMemberAlreadyWritten == true {
|
||||
buffer.WriteString(",")
|
||||
}
|
||||
|
||||
// Record is a JSON object, so we write as-is
|
||||
buffer.WriteString(string(queryResponse.Value))
|
||||
bArrayMemberAlreadyWritten = true
|
||||
}
|
||||
|
||||
fmt.Printf("- getQueryResultForQueryString queryResult:\n%s\n", buffer.String())
|
||||
|
||||
return buffer.Bytes(), nil
|
||||
|
||||
}
|
||||
|
||||
// 添加信息
|
||||
// args: educationObject
|
||||
// 身份证号为 key, Education 为 value
|
||||
func (t *EducationChaincode) addEdu(stub shim.ChaincodeStubInterface, args []string) peer.Response {
|
||||
|
||||
if len(args) != 2{
|
||||
return shim.Error("给定的参数个数不符合要求")
|
||||
}
|
||||
|
||||
var edu Education
|
||||
err := json.Unmarshal([]byte(args[0]), &edu)
|
||||
if err != nil {
|
||||
return shim.Error("反序列化信息时发生错误")
|
||||
}
|
||||
|
||||
// 查重: 身份证号码必须唯一
|
||||
_, exist := GetEduInfo(stub, edu.EntityID)
|
||||
if exist {
|
||||
return shim.Error("要添加的身份证号码已存在")
|
||||
}
|
||||
|
||||
_, bl := PutEdu(stub, edu)
|
||||
if !bl {
|
||||
return shim.Error("保存信息时发生错误")
|
||||
}
|
||||
|
||||
err = stub.SetEvent(args[1], []byte{})
|
||||
if err != nil {
|
||||
return shim.Error(err.Error())
|
||||
}
|
||||
|
||||
return shim.Success([]byte("信息添加成功"))
|
||||
}
|
||||
|
||||
// 根据证书编号及姓名查询信息
|
||||
// args: CertNo, name
|
||||
func (t *EducationChaincode) queryEduByCertNoAndName(stub shim.ChaincodeStubInterface, args []string) peer.Response {
|
||||
|
||||
if len(args) != 2 {
|
||||
return shim.Error("给定的参数个数不符合要求")
|
||||
}
|
||||
CertNo := args[0]
|
||||
name := args[1]
|
||||
|
||||
// 拼装CouchDB所需要的查询字符串(是标准的一个JSON串)
|
||||
// queryString := fmt.Sprintf("{\"selector\":{\"docType\":\"eduObj\", \"CertNo\":\"%s\"}}", CertNo)
|
||||
queryString := fmt.Sprintf("{\"selector\":{\"docType\":\"%s\", \"CertNo\":\"%s\", \"Name\":\"%s\"}}", DOC_TYPE, CertNo, name)
|
||||
|
||||
// 查询数据
|
||||
result, err := getEduByQueryString(stub, queryString)
|
||||
if err != nil {
|
||||
return shim.Error("根据证书编号及姓名查询信息时发生错误")
|
||||
}
|
||||
if result == nil {
|
||||
return shim.Error("根据指定的证书编号及姓名没有查询到相关的信息")
|
||||
}
|
||||
return shim.Success(result)
|
||||
}
|
||||
|
||||
// 根据身份证号码查询详情(溯源)
|
||||
// args: entityID
|
||||
func (t *EducationChaincode) queryEduInfoByEntityID(stub shim.ChaincodeStubInterface, args []string) peer.Response {
|
||||
if len(args) != 1 {
|
||||
return shim.Error("给定的参数个数不符合要求")
|
||||
}
|
||||
|
||||
// 根据身份证号码查询edu状态
|
||||
b, err := stub.GetState(args[0])
|
||||
if err != nil {
|
||||
return shim.Error("根据身份证号码查询信息失败")
|
||||
}
|
||||
|
||||
if b == nil {
|
||||
return shim.Error("根据身份证号码没有查询到相关的信息")
|
||||
}
|
||||
|
||||
// 对查询到的状态进行反序列化
|
||||
var edu Education
|
||||
err = json.Unmarshal(b, &edu)
|
||||
if err != nil {
|
||||
return shim.Error("反序列化edu信息失败")
|
||||
}
|
||||
|
||||
// 获取历史变更数据
|
||||
iterator, err := stub.GetHistoryForKey(edu.EntityID)
|
||||
if err != nil {
|
||||
return shim.Error("根据指定的身份证号码查询对应的历史变更数据失败")
|
||||
}
|
||||
defer iterator.Close()
|
||||
|
||||
// 迭代处理
|
||||
var historys []HistoryItem
|
||||
var hisEdu Education
|
||||
for iterator.HasNext() {
|
||||
hisData, err := iterator.Next()
|
||||
if err != nil {
|
||||
return shim.Error("获取edu的历史变更数据失败")
|
||||
}
|
||||
|
||||
var historyItem HistoryItem
|
||||
historyItem.TxId = hisData.TxId
|
||||
json.Unmarshal(hisData.Value, &hisEdu)
|
||||
|
||||
if hisData.Value == nil {
|
||||
var empty Education
|
||||
historyItem.Education = empty
|
||||
}else {
|
||||
historyItem.Education = hisEdu
|
||||
}
|
||||
|
||||
historys = append(historys, historyItem)
|
||||
|
||||
}
|
||||
|
||||
edu.Historys = historys
|
||||
|
||||
// 返回
|
||||
result, err := json.Marshal(edu)
|
||||
if err != nil {
|
||||
return shim.Error("序列化edu信息时发生错误")
|
||||
}
|
||||
return shim.Success(result)
|
||||
}
|
||||
|
||||
// 根据身份证号更新信息
|
||||
// args: educationObject
|
||||
func (t *EducationChaincode) updateEdu(stub shim.ChaincodeStubInterface, args []string) peer.Response {
|
||||
if len(args) != 2{
|
||||
return shim.Error("给定的参数个数不符合要求")
|
||||
}
|
||||
|
||||
var info Education
|
||||
err := json.Unmarshal([]byte(args[0]), &info)
|
||||
if err != nil {
|
||||
return shim.Error("反序列化edu信息失败")
|
||||
}
|
||||
|
||||
// 根据身份证号码查询信息
|
||||
result, bl := GetEduInfo(stub, info.EntityID)
|
||||
if !bl{
|
||||
return shim.Error("根据身份证号码查询信息时发生错误")
|
||||
}
|
||||
|
||||
result.Name = info.Name
|
||||
result.BirthDay = info.BirthDay
|
||||
result.Nation = info.Nation
|
||||
result.Gender = info.Gender
|
||||
result.Place = info.Place
|
||||
result.EntityID = info.EntityID
|
||||
result.Photo = info.Photo
|
||||
|
||||
|
||||
result.EnrollDate = info.EnrollDate
|
||||
result.GraduationDate = info.GraduationDate
|
||||
result.SchoolName = info.SchoolName
|
||||
result.Major = info.Major
|
||||
result.QuaType = info.QuaType
|
||||
result.Length = info.Length
|
||||
result.Mode = info.Mode
|
||||
result.Level = info.Level
|
||||
result.Graduation = info.Graduation
|
||||
result.CertNo = info.CertNo;
|
||||
|
||||
_, bl = PutEdu(stub, result)
|
||||
if !bl {
|
||||
return shim.Error("保存信息信息时发生错误")
|
||||
}
|
||||
|
||||
err = stub.SetEvent(args[1], []byte{})
|
||||
if err != nil {
|
||||
return shim.Error(err.Error())
|
||||
}
|
||||
|
||||
return shim.Success([]byte("信息更新成功"))
|
||||
}
|
||||
|
||||
// 根据身份证号删除信息(暂不提供)
|
||||
// args: entityID
|
||||
func (t *EducationChaincode) delEdu(stub shim.ChaincodeStubInterface, args []string) peer.Response {
|
||||
if len(args) != 2{
|
||||
return shim.Error("给定的参数个数不符合要求")
|
||||
}
|
||||
|
||||
/*var edu Education
|
||||
result, bl := GetEduInfo(stub, info.EntityID)
|
||||
err := json.Unmarshal(result, &edu)
|
||||
if err != nil {
|
||||
return shim.Error("反序列化信息时发生错误")
|
||||
}*/
|
||||
|
||||
err := stub.DelState(args[0])
|
||||
if err != nil {
|
||||
return shim.Error("删除信息时发生错误")
|
||||
}
|
||||
|
||||
err = stub.SetEvent(args[1], []byte{})
|
||||
if err != nil {
|
||||
return shim.Error(err.Error())
|
||||
}
|
||||
|
||||
return shim.Success([]byte("信息删除成功"))
|
||||
}
|
||||
|
||||
func main(){
|
||||
err := shim.Start(new(EducationChaincode))
|
||||
if err != nil{
|
||||
fmt.Printf("启动EducationChaincode时发生错误: %s", err)
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,8 @@
|
||||
module github.com/kongyixueyuan.com/education/chaincode
|
||||
|
||||
go 1.15
|
||||
|
||||
require (
|
||||
github.com/hyperledger/fabric-chaincode-go v0.0.0-20201119163726-f8ef75b17719
|
||||
github.com/hyperledger/fabric-protos-go v0.0.0-20210127161553-4f432a78f286
|
||||
)
|
@ -0,0 +1,46 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/hyperledger/fabric-chaincode-go v0.0.0-20201119163726-f8ef75b17719 h1:FQ9AMLVSFt5QW2YBLraXW5V4Au6aFFpSl4xKFARM58Y=
|
||||
github.com/hyperledger/fabric-chaincode-go v0.0.0-20201119163726-f8ef75b17719/go.mod h1:N7H3sA7Tx4k/YzFq7U0EPdqJtqvM4Kild0JoCc7C0Dc=
|
||||
github.com/hyperledger/fabric-protos-go v0.0.0-20190919234611-2a87503ac7c9/go.mod h1:xVYTjK4DtZRBxZ2D9aE4y6AbLaPwue2o/criQyQbVD0=
|
||||
github.com/hyperledger/fabric-protos-go v0.0.0-20210127161553-4f432a78f286 h1:cFLrvWUprlCbVixFkaeONNlUtbsjv3c20ujb4RJFBl8=
|
||||
github.com/hyperledger/fabric-protos-go v0.0.0-20210127161553-4f432a78f286/go.mod h1:xVYTjK4DtZRBxZ2D9aE4y6AbLaPwue2o/criQyQbVD0=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092 h1:4QSRKanuywn15aTZvI/mIDEgPQpswuFndXpOj3rKEco=
|
||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190710143415-6ec70d6a5542 h1:6ZQFf1D2YYDDI7eSwW8adlkkavTB9sw5I24FVtEvNUQ=
|
||||
golang.org/x/sys v0.0.0-20190710143415-6ec70d6a5542/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b h1:lohp5blsw53GBXtLyLNaTXPXS9pJ1tiTw61ZHUoE9Qw=
|
||||
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
@ -0,0 +1,3 @@
|
||||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/AUTHORS.
|
@ -0,0 +1,3 @@
|
||||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
@ -0,0 +1,28 @@
|
||||
Copyright 2010 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
@ -0,0 +1,253 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Protocol buffer deep copy and merge.
|
||||
// TODO: RawMessage.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Clone returns a deep copy of a protocol buffer.
|
||||
func Clone(src Message) Message {
|
||||
in := reflect.ValueOf(src)
|
||||
if in.IsNil() {
|
||||
return src
|
||||
}
|
||||
out := reflect.New(in.Type().Elem())
|
||||
dst := out.Interface().(Message)
|
||||
Merge(dst, src)
|
||||
return dst
|
||||
}
|
||||
|
||||
// Merger is the interface representing objects that can merge messages of the same type.
|
||||
type Merger interface {
|
||||
// Merge merges src into this message.
|
||||
// Required and optional fields that are set in src will be set to that value in dst.
|
||||
// Elements of repeated fields will be appended.
|
||||
//
|
||||
// Merge may panic if called with a different argument type than the receiver.
|
||||
Merge(src Message)
|
||||
}
|
||||
|
||||
// generatedMerger is the custom merge method that generated protos will have.
|
||||
// We must add this method since a generate Merge method will conflict with
|
||||
// many existing protos that have a Merge data field already defined.
|
||||
type generatedMerger interface {
|
||||
XXX_Merge(src Message)
|
||||
}
|
||||
|
||||
// Merge merges src into dst.
|
||||
// Required and optional fields that are set in src will be set to that value in dst.
|
||||
// Elements of repeated fields will be appended.
|
||||
// Merge panics if src and dst are not the same type, or if dst is nil.
|
||||
func Merge(dst, src Message) {
|
||||
if m, ok := dst.(Merger); ok {
|
||||
m.Merge(src)
|
||||
return
|
||||
}
|
||||
|
||||
in := reflect.ValueOf(src)
|
||||
out := reflect.ValueOf(dst)
|
||||
if out.IsNil() {
|
||||
panic("proto: nil destination")
|
||||
}
|
||||
if in.Type() != out.Type() {
|
||||
panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
|
||||
}
|
||||
if in.IsNil() {
|
||||
return // Merge from nil src is a noop
|
||||
}
|
||||
if m, ok := dst.(generatedMerger); ok {
|
||||
m.XXX_Merge(src)
|
||||
return
|
||||
}
|
||||
mergeStruct(out.Elem(), in.Elem())
|
||||
}
|
||||
|
||||
func mergeStruct(out, in reflect.Value) {
|
||||
sprop := GetProperties(in.Type())
|
||||
for i := 0; i < in.NumField(); i++ {
|
||||
f := in.Type().Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
|
||||
}
|
||||
|
||||
if emIn, err := extendable(in.Addr().Interface()); err == nil {
|
||||
emOut, _ := extendable(out.Addr().Interface())
|
||||
mIn, muIn := emIn.extensionsRead()
|
||||
if mIn != nil {
|
||||
mOut := emOut.extensionsWrite()
|
||||
muIn.Lock()
|
||||
mergeExtension(mOut, mIn)
|
||||
muIn.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
uf := in.FieldByName("XXX_unrecognized")
|
||||
if !uf.IsValid() {
|
||||
return
|
||||
}
|
||||
uin := uf.Bytes()
|
||||
if len(uin) > 0 {
|
||||
out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
|
||||
}
|
||||
}
|
||||
|
||||
// mergeAny performs a merge between two values of the same type.
|
||||
// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
|
||||
// prop is set if this is a struct field (it may be nil).
|
||||
func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
|
||||
if in.Type() == protoMessageType {
|
||||
if !in.IsNil() {
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
|
||||
} else {
|
||||
Merge(out.Interface().(Message), in.Interface().(Message))
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
switch in.Kind() {
|
||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
|
||||
reflect.String, reflect.Uint32, reflect.Uint64:
|
||||
if !viaPtr && isProto3Zero(in) {
|
||||
return
|
||||
}
|
||||
out.Set(in)
|
||||
case reflect.Interface:
|
||||
// Probably a oneof field; copy non-nil values.
|
||||
if in.IsNil() {
|
||||
return
|
||||
}
|
||||
// Allocate destination if it is not set, or set to a different type.
|
||||
// Otherwise we will merge as normal.
|
||||
if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
|
||||
out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
|
||||
}
|
||||
mergeAny(out.Elem(), in.Elem(), false, nil)
|
||||
case reflect.Map:
|
||||
if in.Len() == 0 {
|
||||
return
|
||||
}
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.MakeMap(in.Type()))
|
||||
}
|
||||
// For maps with value types of *T or []byte we need to deep copy each value.
|
||||
elemKind := in.Type().Elem().Kind()
|
||||
for _, key := range in.MapKeys() {
|
||||
var val reflect.Value
|
||||
switch elemKind {
|
||||
case reflect.Ptr:
|
||||
val = reflect.New(in.Type().Elem().Elem())
|
||||
mergeAny(val, in.MapIndex(key), false, nil)
|
||||
case reflect.Slice:
|
||||
val = in.MapIndex(key)
|
||||
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
|
||||
default:
|
||||
val = in.MapIndex(key)
|
||||
}
|
||||
out.SetMapIndex(key, val)
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if in.IsNil() {
|
||||
return
|
||||
}
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.New(in.Elem().Type()))
|
||||
}
|
||||
mergeAny(out.Elem(), in.Elem(), true, nil)
|
||||
case reflect.Slice:
|
||||
if in.IsNil() {
|
||||
return
|
||||
}
|
||||
if in.Type().Elem().Kind() == reflect.Uint8 {
|
||||
// []byte is a scalar bytes field, not a repeated field.
|
||||
|
||||
// Edge case: if this is in a proto3 message, a zero length
|
||||
// bytes field is considered the zero value, and should not
|
||||
// be merged.
|
||||
if prop != nil && prop.proto3 && in.Len() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Make a deep copy.
|
||||
// Append to []byte{} instead of []byte(nil) so that we never end up
|
||||
// with a nil result.
|
||||
out.SetBytes(append([]byte{}, in.Bytes()...))
|
||||
return
|
||||
}
|
||||
n := in.Len()
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.MakeSlice(in.Type(), 0, n))
|
||||
}
|
||||
switch in.Type().Elem().Kind() {
|
||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
|
||||
reflect.String, reflect.Uint32, reflect.Uint64:
|
||||
out.Set(reflect.AppendSlice(out, in))
|
||||
default:
|
||||
for i := 0; i < n; i++ {
|
||||
x := reflect.Indirect(reflect.New(in.Type().Elem()))
|
||||
mergeAny(x, in.Index(i), false, nil)
|
||||
out.Set(reflect.Append(out, x))
|
||||
}
|
||||
}
|
||||
case reflect.Struct:
|
||||
mergeStruct(out, in)
|
||||
default:
|
||||
// unknown type, so not a protocol buffer
|
||||
log.Printf("proto: don't know how to copy %v", in)
|
||||
}
|
||||
}
|
||||
|
||||
func mergeExtension(out, in map[int32]Extension) {
|
||||
for extNum, eIn := range in {
|
||||
eOut := Extension{desc: eIn.desc}
|
||||
if eIn.value != nil {
|
||||
v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
|
||||
mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
|
||||
eOut.value = v.Interface()
|
||||
}
|
||||
if eIn.enc != nil {
|
||||
eOut.enc = make([]byte, len(eIn.enc))
|
||||
copy(eOut.enc, eIn.enc)
|
||||
}
|
||||
|
||||
out[extNum] = eOut
|
||||
}
|
||||
}
|
@ -0,0 +1,427 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Routines for decoding protocol buffer data to construct in-memory representations.
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// errOverflow is returned when an integer is too large to be represented.
|
||||
var errOverflow = errors.New("proto: integer overflow")
|
||||
|
||||
// ErrInternalBadWireType is returned by generated code when an incorrect
|
||||
// wire type is encountered. It does not get returned to user code.
|
||||
var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
|
||||
|
||||
// DecodeVarint reads a varint-encoded integer from the slice.
|
||||
// It returns the integer and the number of bytes consumed, or
|
||||
// zero if there is not enough.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func DecodeVarint(buf []byte) (x uint64, n int) {
|
||||
for shift := uint(0); shift < 64; shift += 7 {
|
||||
if n >= len(buf) {
|
||||
return 0, 0
|
||||
}
|
||||
b := uint64(buf[n])
|
||||
n++
|
||||
x |= (b & 0x7F) << shift
|
||||
if (b & 0x80) == 0 {
|
||||
return x, n
|
||||
}
|
||||
}
|
||||
|
||||
// The number is too large to represent in a 64-bit value.
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
|
||||
i := p.index
|
||||
l := len(p.buf)
|
||||
|
||||
for shift := uint(0); shift < 64; shift += 7 {
|
||||
if i >= l {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
b := p.buf[i]
|
||||
i++
|
||||
x |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
p.index = i
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// The number is too large to represent in a 64-bit value.
|
||||
err = errOverflow
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeVarint reads a varint-encoded integer from the Buffer.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
||||
i := p.index
|
||||
buf := p.buf
|
||||
|
||||
if i >= len(buf) {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
} else if buf[i] < 0x80 {
|
||||
p.index++
|
||||
return uint64(buf[i]), nil
|
||||
} else if len(buf)-i < 10 {
|
||||
return p.decodeVarintSlow()
|
||||
}
|
||||
|
||||
var b uint64
|
||||
// we already checked the first byte
|
||||
x = uint64(buf[i]) - 0x80
|
||||
i++
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 7
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 7
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 14
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 14
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 21
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 21
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 28
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 28
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 35
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 35
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 42
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 42
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 49
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 49
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 56
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 56
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 63
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
|
||||
return 0, errOverflow
|
||||
|
||||
done:
|
||||
p.index = i
|
||||
return x, nil
|
||||
}
|
||||
|
||||
// DecodeFixed64 reads a 64-bit integer from the Buffer.
|
||||
// This is the format for the
|
||||
// fixed64, sfixed64, and double protocol buffer types.
|
||||
func (p *Buffer) DecodeFixed64() (x uint64, err error) {
|
||||
// x, err already 0
|
||||
i := p.index + 8
|
||||
if i < 0 || i > len(p.buf) {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
p.index = i
|
||||
|
||||
x = uint64(p.buf[i-8])
|
||||
x |= uint64(p.buf[i-7]) << 8
|
||||
x |= uint64(p.buf[i-6]) << 16
|
||||
x |= uint64(p.buf[i-5]) << 24
|
||||
x |= uint64(p.buf[i-4]) << 32
|
||||
x |= uint64(p.buf[i-3]) << 40
|
||||
x |= uint64(p.buf[i-2]) << 48
|
||||
x |= uint64(p.buf[i-1]) << 56
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeFixed32 reads a 32-bit integer from the Buffer.
|
||||
// This is the format for the
|
||||
// fixed32, sfixed32, and float protocol buffer types.
|
||||
func (p *Buffer) DecodeFixed32() (x uint64, err error) {
|
||||
// x, err already 0
|
||||
i := p.index + 4
|
||||
if i < 0 || i > len(p.buf) {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
p.index = i
|
||||
|
||||
x = uint64(p.buf[i-4])
|
||||
x |= uint64(p.buf[i-3]) << 8
|
||||
x |= uint64(p.buf[i-2]) << 16
|
||||
x |= uint64(p.buf[i-1]) << 24
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
|
||||
// from the Buffer.
|
||||
// This is the format used for the sint64 protocol buffer type.
|
||||
func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
|
||||
x, err = p.DecodeVarint()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
|
||||
// from the Buffer.
|
||||
// This is the format used for the sint32 protocol buffer type.
|
||||
func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
|
||||
x, err = p.DecodeVarint()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
|
||||
// This is the format used for the bytes protocol buffer
|
||||
// type and for embedded messages.
|
||||
func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
|
||||
n, err := p.DecodeVarint()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nb := int(n)
|
||||
if nb < 0 {
|
||||
return nil, fmt.Errorf("proto: bad byte length %d", nb)
|
||||
}
|
||||
end := p.index + nb
|
||||
if end < p.index || end > len(p.buf) {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
if !alloc {
|
||||
// todo: check if can get more uses of alloc=false
|
||||
buf = p.buf[p.index:end]
|
||||
p.index += nb
|
||||
return
|
||||
}
|
||||
|
||||
buf = make([]byte, nb)
|
||||
copy(buf, p.buf[p.index:])
|
||||
p.index += nb
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeStringBytes reads an encoded string from the Buffer.
|
||||
// This is the format used for the proto2 string type.
|
||||
func (p *Buffer) DecodeStringBytes() (s string, err error) {
|
||||
buf, err := p.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
// Unmarshaler is the interface representing objects that can
|
||||
// unmarshal themselves. The argument points to data that may be
|
||||
// overwritten, so implementations should not keep references to the
|
||||
// buffer.
|
||||
// Unmarshal implementations should not clear the receiver.
|
||||
// Any unmarshaled data should be merged into the receiver.
|
||||
// Callers of Unmarshal that do not want to retain existing data
|
||||
// should Reset the receiver before calling Unmarshal.
|
||||
type Unmarshaler interface {
|
||||
Unmarshal([]byte) error
|
||||
}
|
||||
|
||||
// newUnmarshaler is the interface representing objects that can
|
||||
// unmarshal themselves. The semantics are identical to Unmarshaler.
|
||||
//
|
||||
// This exists to support protoc-gen-go generated messages.
|
||||
// The proto package will stop type-asserting to this interface in the future.
|
||||
//
|
||||
// DO NOT DEPEND ON THIS.
|
||||
type newUnmarshaler interface {
|
||||
XXX_Unmarshal([]byte) error
|
||||
}
|
||||
|
||||
// Unmarshal parses the protocol buffer representation in buf and places the
|
||||
// decoded result in pb. If the struct underlying pb does not match
|
||||
// the data in buf, the results can be unpredictable.
|
||||
//
|
||||
// Unmarshal resets pb before starting to unmarshal, so any
|
||||
// existing data in pb is always removed. Use UnmarshalMerge
|
||||
// to preserve and append to existing data.
|
||||
func Unmarshal(buf []byte, pb Message) error {
|
||||
pb.Reset()
|
||||
if u, ok := pb.(newUnmarshaler); ok {
|
||||
return u.XXX_Unmarshal(buf)
|
||||
}
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
return u.Unmarshal(buf)
|
||||
}
|
||||
return NewBuffer(buf).Unmarshal(pb)
|
||||
}
|
||||
|
||||
// UnmarshalMerge parses the protocol buffer representation in buf and
|
||||
// writes the decoded result to pb. If the struct underlying pb does not match
|
||||
// the data in buf, the results can be unpredictable.
|
||||
//
|
||||
// UnmarshalMerge merges into existing data in pb.
|
||||
// Most code should use Unmarshal instead.
|
||||
func UnmarshalMerge(buf []byte, pb Message) error {
|
||||
if u, ok := pb.(newUnmarshaler); ok {
|
||||
return u.XXX_Unmarshal(buf)
|
||||
}
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
// NOTE: The history of proto have unfortunately been inconsistent
|
||||
// whether Unmarshaler should or should not implicitly clear itself.
|
||||
// Some implementations do, most do not.
|
||||
// Thus, calling this here may or may not do what people want.
|
||||
//
|
||||
// See https://github.com/golang/protobuf/issues/424
|
||||
return u.Unmarshal(buf)
|
||||
}
|
||||
return NewBuffer(buf).Unmarshal(pb)
|
||||
}
|
||||
|
||||
// DecodeMessage reads a count-delimited message from the Buffer.
|
||||
func (p *Buffer) DecodeMessage(pb Message) error {
|
||||
enc, err := p.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return NewBuffer(enc).Unmarshal(pb)
|
||||
}
|
||||
|
||||
// DecodeGroup reads a tag-delimited group from the Buffer.
|
||||
// StartGroup tag is already consumed. This function consumes
|
||||
// EndGroup tag.
|
||||
func (p *Buffer) DecodeGroup(pb Message) error {
|
||||
b := p.buf[p.index:]
|
||||
x, y := findEndGroup(b)
|
||||
if x < 0 {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
err := Unmarshal(b[:x], pb)
|
||||
p.index += y
|
||||
return err
|
||||
}
|
||||
|
||||
// Unmarshal parses the protocol buffer representation in the
|
||||
// Buffer and places the decoded result in pb. If the struct
|
||||
// underlying pb does not match the data in the buffer, the results can be
|
||||
// unpredictable.
|
||||
//
|
||||
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
|
||||
func (p *Buffer) Unmarshal(pb Message) error {
|
||||
// If the object can unmarshal itself, let it.
|
||||
if u, ok := pb.(newUnmarshaler); ok {
|
||||
err := u.XXX_Unmarshal(p.buf[p.index:])
|
||||
p.index = len(p.buf)
|
||||
return err
|
||||
}
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
// NOTE: The history of proto have unfortunately been inconsistent
|
||||
// whether Unmarshaler should or should not implicitly clear itself.
|
||||
// Some implementations do, most do not.
|
||||
// Thus, calling this here may or may not do what people want.
|
||||
//
|
||||
// See https://github.com/golang/protobuf/issues/424
|
||||
err := u.Unmarshal(p.buf[p.index:])
|
||||
p.index = len(p.buf)
|
||||
return err
|
||||
}
|
||||
|
||||
// Slow workaround for messages that aren't Unmarshalers.
|
||||
// This includes some hand-coded .pb.go files and
|
||||
// bootstrap protos.
|
||||
// TODO: fix all of those and then add Unmarshal to
|
||||
// the Message interface. Then:
|
||||
// The cast above and code below can be deleted.
|
||||
// The old unmarshaler can be deleted.
|
||||
// Clients can call Unmarshal directly (can already do that, actually).
|
||||
var info InternalMessageInfo
|
||||
err := info.Unmarshal(pb, p.buf[p.index:])
|
||||
p.index = len(p.buf)
|
||||
return err
|
||||
}
|
@ -0,0 +1,63 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
import "errors"
|
||||
|
||||
// Deprecated: do not use.
|
||||
type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
|
||||
|
||||
// Deprecated: do not use.
|
||||
func GetStats() Stats { return Stats{} }
|
||||
|
||||
// Deprecated: do not use.
|
||||
func MarshalMessageSet(interface{}) ([]byte, error) {
|
||||
return nil, errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
func UnmarshalMessageSet([]byte, interface{}) error {
|
||||
return errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
func MarshalMessageSetJSON(interface{}) ([]byte, error) {
|
||||
return nil, errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
func UnmarshalMessageSetJSON([]byte, interface{}) error {
|
||||
return errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
func RegisterMessageSetType(Message, int32, string) {}
|
@ -0,0 +1,350 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type generatedDiscarder interface {
|
||||
XXX_DiscardUnknown()
|
||||
}
|
||||
|
||||
// DiscardUnknown recursively discards all unknown fields from this message
|
||||
// and all embedded messages.
|
||||
//
|
||||
// When unmarshaling a message with unrecognized fields, the tags and values
|
||||
// of such fields are preserved in the Message. This allows a later call to
|
||||
// marshal to be able to produce a message that continues to have those
|
||||
// unrecognized fields. To avoid this, DiscardUnknown is used to
|
||||
// explicitly clear the unknown fields after unmarshaling.
|
||||
//
|
||||
// For proto2 messages, the unknown fields of message extensions are only
|
||||
// discarded from messages that have been accessed via GetExtension.
|
||||
func DiscardUnknown(m Message) {
|
||||
if m, ok := m.(generatedDiscarder); ok {
|
||||
m.XXX_DiscardUnknown()
|
||||
return
|
||||
}
|
||||
// TODO: Dynamically populate a InternalMessageInfo for legacy messages,
|
||||
// but the master branch has no implementation for InternalMessageInfo,
|
||||
// so it would be more work to replicate that approach.
|
||||
discardLegacy(m)
|
||||
}
|
||||
|
||||
// DiscardUnknown recursively discards all unknown fields.
|
||||
func (a *InternalMessageInfo) DiscardUnknown(m Message) {
|
||||
di := atomicLoadDiscardInfo(&a.discard)
|
||||
if di == nil {
|
||||
di = getDiscardInfo(reflect.TypeOf(m).Elem())
|
||||
atomicStoreDiscardInfo(&a.discard, di)
|
||||
}
|
||||
di.discard(toPointer(&m))
|
||||
}
|
||||
|
||||
type discardInfo struct {
|
||||
typ reflect.Type
|
||||
|
||||
initialized int32 // 0: only typ is valid, 1: everything is valid
|
||||
lock sync.Mutex
|
||||
|
||||
fields []discardFieldInfo
|
||||
unrecognized field
|
||||
}
|
||||
|
||||
type discardFieldInfo struct {
|
||||
field field // Offset of field, guaranteed to be valid
|
||||
discard func(src pointer)
|
||||
}
|
||||
|
||||
var (
|
||||
discardInfoMap = map[reflect.Type]*discardInfo{}
|
||||
discardInfoLock sync.Mutex
|
||||
)
|
||||
|
||||
func getDiscardInfo(t reflect.Type) *discardInfo {
|
||||
discardInfoLock.Lock()
|
||||
defer discardInfoLock.Unlock()
|
||||
di := discardInfoMap[t]
|
||||
if di == nil {
|
||||
di = &discardInfo{typ: t}
|
||||
discardInfoMap[t] = di
|
||||
}
|
||||
return di
|
||||
}
|
||||
|
||||
func (di *discardInfo) discard(src pointer) {
|
||||
if src.isNil() {
|
||||
return // Nothing to do.
|
||||
}
|
||||
|
||||
if atomic.LoadInt32(&di.initialized) == 0 {
|
||||
di.computeDiscardInfo()
|
||||
}
|
||||
|
||||
for _, fi := range di.fields {
|
||||
sfp := src.offset(fi.field)
|
||||
fi.discard(sfp)
|
||||
}
|
||||
|
||||
// For proto2 messages, only discard unknown fields in message extensions
|
||||
// that have been accessed via GetExtension.
|
||||
if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
|
||||
// Ignore lock since DiscardUnknown is not concurrency safe.
|
||||
emm, _ := em.extensionsRead()
|
||||
for _, mx := range emm {
|
||||
if m, ok := mx.value.(Message); ok {
|
||||
DiscardUnknown(m)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if di.unrecognized.IsValid() {
|
||||
*src.offset(di.unrecognized).toBytes() = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (di *discardInfo) computeDiscardInfo() {
|
||||
di.lock.Lock()
|
||||
defer di.lock.Unlock()
|
||||
if di.initialized != 0 {
|
||||
return
|
||||
}
|
||||
t := di.typ
|
||||
n := t.NumField()
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
f := t.Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
|
||||
dfi := discardFieldInfo{field: toField(&f)}
|
||||
tf := f.Type
|
||||
|
||||
// Unwrap tf to get its most basic type.
|
||||
var isPointer, isSlice bool
|
||||
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
||||
isSlice = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if tf.Kind() == reflect.Ptr {
|
||||
isPointer = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
|
||||
}
|
||||
|
||||
switch tf.Kind() {
|
||||
case reflect.Struct:
|
||||
switch {
|
||||
case !isPointer:
|
||||
panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
|
||||
case isSlice: // E.g., []*pb.T
|
||||
di := getDiscardInfo(tf)
|
||||
dfi.discard = func(src pointer) {
|
||||
sps := src.getPointerSlice()
|
||||
for _, sp := range sps {
|
||||
if !sp.isNil() {
|
||||
di.discard(sp)
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., *pb.T
|
||||
di := getDiscardInfo(tf)
|
||||
dfi.discard = func(src pointer) {
|
||||
sp := src.getPointer()
|
||||
if !sp.isNil() {
|
||||
di.discard(sp)
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Map:
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
|
||||
default: // E.g., map[K]V
|
||||
if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
|
||||
dfi.discard = func(src pointer) {
|
||||
sm := src.asPointerTo(tf).Elem()
|
||||
if sm.Len() == 0 {
|
||||
return
|
||||
}
|
||||
for _, key := range sm.MapKeys() {
|
||||
val := sm.MapIndex(key)
|
||||
DiscardUnknown(val.Interface().(Message))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
dfi.discard = func(pointer) {} // Noop
|
||||
}
|
||||
}
|
||||
case reflect.Interface:
|
||||
// Must be oneof field.
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
|
||||
default: // E.g., interface{}
|
||||
// TODO: Make this faster?
|
||||
dfi.discard = func(src pointer) {
|
||||
su := src.asPointerTo(tf).Elem()
|
||||
if !su.IsNil() {
|
||||
sv := su.Elem().Elem().Field(0)
|
||||
if sv.Kind() == reflect.Ptr && sv.IsNil() {
|
||||
return
|
||||
}
|
||||
switch sv.Type().Kind() {
|
||||
case reflect.Ptr: // Proto struct (e.g., *T)
|
||||
DiscardUnknown(sv.Interface().(Message))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
continue
|
||||
}
|
||||
di.fields = append(di.fields, dfi)
|
||||
}
|
||||
|
||||
di.unrecognized = invalidField
|
||||
if f, ok := t.FieldByName("XXX_unrecognized"); ok {
|
||||
if f.Type != reflect.TypeOf([]byte{}) {
|
||||
panic("expected XXX_unrecognized to be of type []byte")
|
||||
}
|
||||
di.unrecognized = toField(&f)
|
||||
}
|
||||
|
||||
atomic.StoreInt32(&di.initialized, 1)
|
||||
}
|
||||
|
||||
func discardLegacy(m Message) {
|
||||
v := reflect.ValueOf(m)
|
||||
if v.Kind() != reflect.Ptr || v.IsNil() {
|
||||
return
|
||||
}
|
||||
v = v.Elem()
|
||||
if v.Kind() != reflect.Struct {
|
||||
return
|
||||
}
|
||||
t := v.Type()
|
||||
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
vf := v.Field(i)
|
||||
tf := f.Type
|
||||
|
||||
// Unwrap tf to get its most basic type.
|
||||
var isPointer, isSlice bool
|
||||
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
||||
isSlice = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if tf.Kind() == reflect.Ptr {
|
||||
isPointer = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
|
||||
}
|
||||
|
||||
switch tf.Kind() {
|
||||
case reflect.Struct:
|
||||
switch {
|
||||
case !isPointer:
|
||||
panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
|
||||
case isSlice: // E.g., []*pb.T
|
||||
for j := 0; j < vf.Len(); j++ {
|
||||
discardLegacy(vf.Index(j).Interface().(Message))
|
||||
}
|
||||
default: // E.g., *pb.T
|
||||
discardLegacy(vf.Interface().(Message))
|
||||
}
|
||||
case reflect.Map:
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
|
||||
default: // E.g., map[K]V
|
||||
tv := vf.Type().Elem()
|
||||
if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
|
||||
for _, key := range vf.MapKeys() {
|
||||
val := vf.MapIndex(key)
|
||||
discardLegacy(val.Interface().(Message))
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Interface:
|
||||
// Must be oneof field.
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
|
||||
default: // E.g., test_proto.isCommunique_Union interface
|
||||
if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
|
||||
vf = vf.Elem() // E.g., *test_proto.Communique_Msg
|
||||
if !vf.IsNil() {
|
||||
vf = vf.Elem() // E.g., test_proto.Communique_Msg
|
||||
vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
|
||||
if vf.Kind() == reflect.Ptr {
|
||||
discardLegacy(vf.Interface().(Message))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
|
||||
if vf.Type() != reflect.TypeOf([]byte{}) {
|
||||
panic("expected XXX_unrecognized to be of type []byte")
|
||||
}
|
||||
vf.Set(reflect.ValueOf([]byte(nil)))
|
||||
}
|
||||
|
||||
// For proto2 messages, only discard unknown fields in message extensions
|
||||
// that have been accessed via GetExtension.
|
||||
if em, err := extendable(m); err == nil {
|
||||
// Ignore lock since discardLegacy is not concurrency safe.
|
||||
emm, _ := em.extensionsRead()
|
||||
for _, mx := range emm {
|
||||
if m, ok := mx.value.(Message); ok {
|
||||
discardLegacy(m)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,203 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Routines for encoding data into the wire format for protocol buffers.
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
var (
|
||||
// errRepeatedHasNil is the error returned if Marshal is called with
|
||||
// a struct with a repeated field containing a nil element.
|
||||
errRepeatedHasNil = errors.New("proto: repeated field has nil element")
|
||||
|
||||
// errOneofHasNil is the error returned if Marshal is called with
|
||||
// a struct with a oneof field containing a nil element.
|
||||
errOneofHasNil = errors.New("proto: oneof field has nil value")
|
||||
|
||||
// ErrNil is the error returned if Marshal is called with nil.
|
||||
ErrNil = errors.New("proto: Marshal called with nil")
|
||||
|
||||
// ErrTooLarge is the error returned if Marshal is called with a
|
||||
// message that encodes to >2GB.
|
||||
ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
|
||||
)
|
||||
|
||||
// The fundamental encoders that put bytes on the wire.
|
||||
// Those that take integer types all accept uint64 and are
|
||||
// therefore of type valueEncoder.
|
||||
|
||||
const maxVarintBytes = 10 // maximum length of a varint
|
||||
|
||||
// EncodeVarint returns the varint encoding of x.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
// Not used by the package itself, but helpful to clients
|
||||
// wishing to use the same encoding.
|
||||
func EncodeVarint(x uint64) []byte {
|
||||
var buf [maxVarintBytes]byte
|
||||
var n int
|
||||
for n = 0; x > 127; n++ {
|
||||
buf[n] = 0x80 | uint8(x&0x7F)
|
||||
x >>= 7
|
||||
}
|
||||
buf[n] = uint8(x)
|
||||
n++
|
||||
return buf[0:n]
|
||||
}
|
||||
|
||||
// EncodeVarint writes a varint-encoded integer to the Buffer.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func (p *Buffer) EncodeVarint(x uint64) error {
|
||||
for x >= 1<<7 {
|
||||
p.buf = append(p.buf, uint8(x&0x7f|0x80))
|
||||
x >>= 7
|
||||
}
|
||||
p.buf = append(p.buf, uint8(x))
|
||||
return nil
|
||||
}
|
||||
|
||||
// SizeVarint returns the varint encoding size of an integer.
|
||||
func SizeVarint(x uint64) int {
|
||||
switch {
|
||||
case x < 1<<7:
|
||||
return 1
|
||||
case x < 1<<14:
|
||||
return 2
|
||||
case x < 1<<21:
|
||||
return 3
|
||||
case x < 1<<28:
|
||||
return 4
|
||||
case x < 1<<35:
|
||||
return 5
|
||||
case x < 1<<42:
|
||||
return 6
|
||||
case x < 1<<49:
|
||||
return 7
|
||||
case x < 1<<56:
|
||||
return 8
|
||||
case x < 1<<63:
|
||||
return 9
|
||||
}
|
||||
return 10
|
||||
}
|
||||
|
||||
// EncodeFixed64 writes a 64-bit integer to the Buffer.
|
||||
// This is the format for the
|
||||
// fixed64, sfixed64, and double protocol buffer types.
|
||||
func (p *Buffer) EncodeFixed64(x uint64) error {
|
||||
p.buf = append(p.buf,
|
||||
uint8(x),
|
||||
uint8(x>>8),
|
||||
uint8(x>>16),
|
||||
uint8(x>>24),
|
||||
uint8(x>>32),
|
||||
uint8(x>>40),
|
||||
uint8(x>>48),
|
||||
uint8(x>>56))
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeFixed32 writes a 32-bit integer to the Buffer.
|
||||
// This is the format for the
|
||||
// fixed32, sfixed32, and float protocol buffer types.
|
||||
func (p *Buffer) EncodeFixed32(x uint64) error {
|
||||
p.buf = append(p.buf,
|
||||
uint8(x),
|
||||
uint8(x>>8),
|
||||
uint8(x>>16),
|
||||
uint8(x>>24))
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
|
||||
// to the Buffer.
|
||||
// This is the format used for the sint64 protocol buffer type.
|
||||
func (p *Buffer) EncodeZigzag64(x uint64) error {
|
||||
// use signed number to get arithmetic right shift.
|
||||
return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
|
||||
// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
|
||||
// to the Buffer.
|
||||
// This is the format used for the sint32 protocol buffer type.
|
||||
func (p *Buffer) EncodeZigzag32(x uint64) error {
|
||||
// use signed number to get arithmetic right shift.
|
||||
return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
|
||||
}
|
||||
|
||||
// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
|
||||
// This is the format used for the bytes protocol buffer
|
||||
// type and for embedded messages.
|
||||
func (p *Buffer) EncodeRawBytes(b []byte) error {
|
||||
p.EncodeVarint(uint64(len(b)))
|
||||
p.buf = append(p.buf, b...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeStringBytes writes an encoded string to the Buffer.
|
||||
// This is the format used for the proto2 string type.
|
||||
func (p *Buffer) EncodeStringBytes(s string) error {
|
||||
p.EncodeVarint(uint64(len(s)))
|
||||
p.buf = append(p.buf, s...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Marshaler is the interface representing objects that can marshal themselves.
|
||||
type Marshaler interface {
|
||||
Marshal() ([]byte, error)
|
||||
}
|
||||
|
||||
// EncodeMessage writes the protocol buffer to the Buffer,
|
||||
// prefixed by a varint-encoded length.
|
||||
func (p *Buffer) EncodeMessage(pb Message) error {
|
||||
siz := Size(pb)
|
||||
p.EncodeVarint(uint64(siz))
|
||||
return p.Marshal(pb)
|
||||
}
|
||||
|
||||
// All protocol buffer fields are nillable, but be careful.
|
||||
func isNil(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
return v.IsNil()
|
||||
}
|
||||
return false
|
||||
}
|
@ -0,0 +1,301 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Protocol buffer comparison.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
/*
|
||||
Equal returns true iff protocol buffers a and b are equal.
|
||||
The arguments must both be pointers to protocol buffer structs.
|
||||
|
||||
Equality is defined in this way:
|
||||
- Two messages are equal iff they are the same type,
|
||||
corresponding fields are equal, unknown field sets
|
||||
are equal, and extensions sets are equal.
|
||||
- Two set scalar fields are equal iff their values are equal.
|
||||
If the fields are of a floating-point type, remember that
|
||||
NaN != x for all x, including NaN. If the message is defined
|
||||
in a proto3 .proto file, fields are not "set"; specifically,
|
||||
zero length proto3 "bytes" fields are equal (nil == {}).
|
||||
- Two repeated fields are equal iff their lengths are the same,
|
||||
and their corresponding elements are equal. Note a "bytes" field,
|
||||
although represented by []byte, is not a repeated field and the
|
||||
rule for the scalar fields described above applies.
|
||||
- Two unset fields are equal.
|
||||
- Two unknown field sets are equal if their current
|
||||
encoded state is equal.
|
||||
- Two extension sets are equal iff they have corresponding
|
||||
elements that are pairwise equal.
|
||||
- Two map fields are equal iff their lengths are the same,
|
||||
and they contain the same set of elements. Zero-length map
|
||||
fields are equal.
|
||||
- Every other combination of things are not equal.
|
||||
|
||||
The return value is undefined if a and b are not protocol buffers.
|
||||
*/
|
||||
func Equal(a, b Message) bool {
|
||||
if a == nil || b == nil {
|
||||
return a == b
|
||||
}
|
||||
v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
|
||||
if v1.Type() != v2.Type() {
|
||||
return false
|
||||
}
|
||||
if v1.Kind() == reflect.Ptr {
|
||||
if v1.IsNil() {
|
||||
return v2.IsNil()
|
||||
}
|
||||
if v2.IsNil() {
|
||||
return false
|
||||
}
|
||||
v1, v2 = v1.Elem(), v2.Elem()
|
||||
}
|
||||
if v1.Kind() != reflect.Struct {
|
||||
return false
|
||||
}
|
||||
return equalStruct(v1, v2)
|
||||
}
|
||||
|
||||
// v1 and v2 are known to have the same type.
|
||||
func equalStruct(v1, v2 reflect.Value) bool {
|
||||
sprop := GetProperties(v1.Type())
|
||||
for i := 0; i < v1.NumField(); i++ {
|
||||
f := v1.Type().Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
f1, f2 := v1.Field(i), v2.Field(i)
|
||||
if f.Type.Kind() == reflect.Ptr {
|
||||
if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
|
||||
// both unset
|
||||
continue
|
||||
} else if n1 != n2 {
|
||||
// set/unset mismatch
|
||||
return false
|
||||
}
|
||||
f1, f2 = f1.Elem(), f2.Elem()
|
||||
}
|
||||
if !equalAny(f1, f2, sprop.Prop[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
|
||||
em2 := v2.FieldByName("XXX_InternalExtensions")
|
||||
if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
|
||||
em2 := v2.FieldByName("XXX_extensions")
|
||||
if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
uf := v1.FieldByName("XXX_unrecognized")
|
||||
if !uf.IsValid() {
|
||||
return true
|
||||
}
|
||||
|
||||
u1 := uf.Bytes()
|
||||
u2 := v2.FieldByName("XXX_unrecognized").Bytes()
|
||||
return bytes.Equal(u1, u2)
|
||||
}
|
||||
|
||||
// v1 and v2 are known to have the same type.
|
||||
// prop may be nil.
|
||||
func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
|
||||
if v1.Type() == protoMessageType {
|
||||
m1, _ := v1.Interface().(Message)
|
||||
m2, _ := v2.Interface().(Message)
|
||||
return Equal(m1, m2)
|
||||
}
|
||||
switch v1.Kind() {
|
||||
case reflect.Bool:
|
||||
return v1.Bool() == v2.Bool()
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v1.Float() == v2.Float()
|
||||
case reflect.Int32, reflect.Int64:
|
||||
return v1.Int() == v2.Int()
|
||||
case reflect.Interface:
|
||||
// Probably a oneof field; compare the inner values.
|
||||
n1, n2 := v1.IsNil(), v2.IsNil()
|
||||
if n1 || n2 {
|
||||
return n1 == n2
|
||||
}
|
||||
e1, e2 := v1.Elem(), v2.Elem()
|
||||
if e1.Type() != e2.Type() {
|
||||
return false
|
||||
}
|
||||
return equalAny(e1, e2, nil)
|
||||
case reflect.Map:
|
||||
if v1.Len() != v2.Len() {
|
||||
return false
|
||||
}
|
||||
for _, key := range v1.MapKeys() {
|
||||
val2 := v2.MapIndex(key)
|
||||
if !val2.IsValid() {
|
||||
// This key was not found in the second map.
|
||||
return false
|
||||
}
|
||||
if !equalAny(v1.MapIndex(key), val2, nil) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case reflect.Ptr:
|
||||
// Maps may have nil values in them, so check for nil.
|
||||
if v1.IsNil() && v2.IsNil() {
|
||||
return true
|
||||
}
|
||||
if v1.IsNil() != v2.IsNil() {
|
||||
return false
|
||||
}
|
||||
return equalAny(v1.Elem(), v2.Elem(), prop)
|
||||
case reflect.Slice:
|
||||
if v1.Type().Elem().Kind() == reflect.Uint8 {
|
||||
// short circuit: []byte
|
||||
|
||||
// Edge case: if this is in a proto3 message, a zero length
|
||||
// bytes field is considered the zero value.
|
||||
if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
|
||||
return true
|
||||
}
|
||||
if v1.IsNil() != v2.IsNil() {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
|
||||
}
|
||||
|
||||
if v1.Len() != v2.Len() {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < v1.Len(); i++ {
|
||||
if !equalAny(v1.Index(i), v2.Index(i), prop) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case reflect.String:
|
||||
return v1.Interface().(string) == v2.Interface().(string)
|
||||
case reflect.Struct:
|
||||
return equalStruct(v1, v2)
|
||||
case reflect.Uint32, reflect.Uint64:
|
||||
return v1.Uint() == v2.Uint()
|
||||
}
|
||||
|
||||
// unknown type, so not a protocol buffer
|
||||
log.Printf("proto: don't know how to compare %v", v1)
|
||||
return false
|
||||
}
|
||||
|
||||
// base is the struct type that the extensions are based on.
|
||||
// x1 and x2 are InternalExtensions.
|
||||
func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
|
||||
em1, _ := x1.extensionsRead()
|
||||
em2, _ := x2.extensionsRead()
|
||||
return equalExtMap(base, em1, em2)
|
||||
}
|
||||
|
||||
func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
|
||||
if len(em1) != len(em2) {
|
||||
return false
|
||||
}
|
||||
|
||||
for extNum, e1 := range em1 {
|
||||
e2, ok := em2[extNum]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
m1 := extensionAsLegacyType(e1.value)
|
||||
m2 := extensionAsLegacyType(e2.value)
|
||||
|
||||
if m1 == nil && m2 == nil {
|
||||
// Both have only encoded form.
|
||||
if bytes.Equal(e1.enc, e2.enc) {
|
||||
continue
|
||||
}
|
||||
// The bytes are different, but the extensions might still be
|
||||
// equal. We need to decode them to compare.
|
||||
}
|
||||
|
||||
if m1 != nil && m2 != nil {
|
||||
// Both are unencoded.
|
||||
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
|
||||
return false
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// At least one is encoded. To do a semantically correct comparison
|
||||
// we need to unmarshal them first.
|
||||
var desc *ExtensionDesc
|
||||
if m := extensionMaps[base]; m != nil {
|
||||
desc = m[extNum]
|
||||
}
|
||||
if desc == nil {
|
||||
// If both have only encoded form and the bytes are the same,
|
||||
// it is handled above. We get here when the bytes are different.
|
||||
// We don't know how to decode it, so just compare them as byte
|
||||
// slices.
|
||||
log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
|
||||
return false
|
||||
}
|
||||
var err error
|
||||
if m1 == nil {
|
||||
m1, err = decodeExtension(e1.enc, desc)
|
||||
}
|
||||
if m2 == nil && err == nil {
|
||||
m2, err = decodeExtension(e2.enc, desc)
|
||||
}
|
||||
if err != nil {
|
||||
// The encoded form is invalid.
|
||||
log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
|
||||
return false
|
||||
}
|
||||
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
@ -0,0 +1,607 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Types and routines for supporting protocol buffer extensions.
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
|
||||
var ErrMissingExtension = errors.New("proto: missing extension")
|
||||
|
||||
// ExtensionRange represents a range of message extensions for a protocol buffer.
|
||||
// Used in code generated by the protocol compiler.
|
||||
type ExtensionRange struct {
|
||||
Start, End int32 // both inclusive
|
||||
}
|
||||
|
||||
// extendableProto is an interface implemented by any protocol buffer generated by the current
|
||||
// proto compiler that may be extended.
|
||||
type extendableProto interface {
|
||||
Message
|
||||
ExtensionRangeArray() []ExtensionRange
|
||||
extensionsWrite() map[int32]Extension
|
||||
extensionsRead() (map[int32]Extension, sync.Locker)
|
||||
}
|
||||
|
||||
// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
|
||||
// version of the proto compiler that may be extended.
|
||||
type extendableProtoV1 interface {
|
||||
Message
|
||||
ExtensionRangeArray() []ExtensionRange
|
||||
ExtensionMap() map[int32]Extension
|
||||
}
|
||||
|
||||
// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
|
||||
type extensionAdapter struct {
|
||||
extendableProtoV1
|
||||
}
|
||||
|
||||
func (e extensionAdapter) extensionsWrite() map[int32]Extension {
|
||||
return e.ExtensionMap()
|
||||
}
|
||||
|
||||
func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
|
||||
return e.ExtensionMap(), notLocker{}
|
||||
}
|
||||
|
||||
// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
|
||||
type notLocker struct{}
|
||||
|
||||
func (n notLocker) Lock() {}
|
||||
func (n notLocker) Unlock() {}
|
||||
|
||||
// extendable returns the extendableProto interface for the given generated proto message.
|
||||
// If the proto message has the old extension format, it returns a wrapper that implements
|
||||
// the extendableProto interface.
|
||||
func extendable(p interface{}) (extendableProto, error) {
|
||||
switch p := p.(type) {
|
||||
case extendableProto:
|
||||
if isNilPtr(p) {
|
||||
return nil, fmt.Errorf("proto: nil %T is not extendable", p)
|
||||
}
|
||||
return p, nil
|
||||
case extendableProtoV1:
|
||||
if isNilPtr(p) {
|
||||
return nil, fmt.Errorf("proto: nil %T is not extendable", p)
|
||||
}
|
||||
return extensionAdapter{p}, nil
|
||||
}
|
||||
// Don't allocate a specific error containing %T:
|
||||
// this is the hot path for Clone and MarshalText.
|
||||
return nil, errNotExtendable
|
||||
}
|
||||
|
||||
var errNotExtendable = errors.New("proto: not an extendable proto.Message")
|
||||
|
||||
func isNilPtr(x interface{}) bool {
|
||||
v := reflect.ValueOf(x)
|
||||
return v.Kind() == reflect.Ptr && v.IsNil()
|
||||
}
|
||||
|
||||
// XXX_InternalExtensions is an internal representation of proto extensions.
|
||||
//
|
||||
// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
|
||||
// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
|
||||
//
|
||||
// The methods of XXX_InternalExtensions are not concurrency safe in general,
|
||||
// but calls to logically read-only methods such as has and get may be executed concurrently.
|
||||
type XXX_InternalExtensions struct {
|
||||
// The struct must be indirect so that if a user inadvertently copies a
|
||||
// generated message and its embedded XXX_InternalExtensions, they
|
||||
// avoid the mayhem of a copied mutex.
|
||||
//
|
||||
// The mutex serializes all logically read-only operations to p.extensionMap.
|
||||
// It is up to the client to ensure that write operations to p.extensionMap are
|
||||
// mutually exclusive with other accesses.
|
||||
p *struct {
|
||||
mu sync.Mutex
|
||||
extensionMap map[int32]Extension
|
||||
}
|
||||
}
|
||||
|
||||
// extensionsWrite returns the extension map, creating it on first use.
|
||||
func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
|
||||
if e.p == nil {
|
||||
e.p = new(struct {
|
||||
mu sync.Mutex
|
||||
extensionMap map[int32]Extension
|
||||
})
|
||||
e.p.extensionMap = make(map[int32]Extension)
|
||||
}
|
||||
return e.p.extensionMap
|
||||
}
|
||||
|
||||
// extensionsRead returns the extensions map for read-only use. It may be nil.
|
||||
// The caller must hold the returned mutex's lock when accessing Elements within the map.
|
||||
func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
|
||||
if e.p == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return e.p.extensionMap, &e.p.mu
|
||||
}
|
||||
|
||||
// ExtensionDesc represents an extension specification.
|
||||
// Used in generated code from the protocol compiler.
|
||||
type ExtensionDesc struct {
|
||||
ExtendedType Message // nil pointer to the type that is being extended
|
||||
ExtensionType interface{} // nil pointer to the extension type
|
||||
Field int32 // field number
|
||||
Name string // fully-qualified name of extension, for text formatting
|
||||
Tag string // protobuf tag style
|
||||
Filename string // name of the file in which the extension is defined
|
||||
}
|
||||
|
||||
func (ed *ExtensionDesc) repeated() bool {
|
||||
t := reflect.TypeOf(ed.ExtensionType)
|
||||
return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
|
||||
}
|
||||
|
||||
// Extension represents an extension in a message.
|
||||
type Extension struct {
|
||||
// When an extension is stored in a message using SetExtension
|
||||
// only desc and value are set. When the message is marshaled
|
||||
// enc will be set to the encoded form of the message.
|
||||
//
|
||||
// When a message is unmarshaled and contains extensions, each
|
||||
// extension will have only enc set. When such an extension is
|
||||
// accessed using GetExtension (or GetExtensions) desc and value
|
||||
// will be set.
|
||||
desc *ExtensionDesc
|
||||
|
||||
// value is a concrete value for the extension field. Let the type of
|
||||
// desc.ExtensionType be the "API type" and the type of Extension.value
|
||||
// be the "storage type". The API type and storage type are the same except:
|
||||
// * For scalars (except []byte), the API type uses *T,
|
||||
// while the storage type uses T.
|
||||
// * For repeated fields, the API type uses []T, while the storage type
|
||||
// uses *[]T.
|
||||
//
|
||||
// The reason for the divergence is so that the storage type more naturally
|
||||
// matches what is expected of when retrieving the values through the
|
||||
// protobuf reflection APIs.
|
||||
//
|
||||
// The value may only be populated if desc is also populated.
|
||||
value interface{}
|
||||
|
||||
// enc is the raw bytes for the extension field.
|
||||
enc []byte
|
||||
}
|
||||
|
||||
// SetRawExtension is for testing only.
|
||||
func SetRawExtension(base Message, id int32, b []byte) {
|
||||
epb, err := extendable(base)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
extmap := epb.extensionsWrite()
|
||||
extmap[id] = Extension{enc: b}
|
||||
}
|
||||
|
||||
// isExtensionField returns true iff the given field number is in an extension range.
|
||||
func isExtensionField(pb extendableProto, field int32) bool {
|
||||
for _, er := range pb.ExtensionRangeArray() {
|
||||
if er.Start <= field && field <= er.End {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// checkExtensionTypes checks that the given extension is valid for pb.
|
||||
func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
|
||||
var pbi interface{} = pb
|
||||
// Check the extended type.
|
||||
if ea, ok := pbi.(extensionAdapter); ok {
|
||||
pbi = ea.extendableProtoV1
|
||||
}
|
||||
if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
|
||||
return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
|
||||
}
|
||||
// Check the range.
|
||||
if !isExtensionField(pb, extension.Field) {
|
||||
return errors.New("proto: bad extension number; not in declared ranges")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// extPropKey is sufficient to uniquely identify an extension.
|
||||
type extPropKey struct {
|
||||
base reflect.Type
|
||||
field int32
|
||||
}
|
||||
|
||||
var extProp = struct {
|
||||
sync.RWMutex
|
||||
m map[extPropKey]*Properties
|
||||
}{
|
||||
m: make(map[extPropKey]*Properties),
|
||||
}
|
||||
|
||||
func extensionProperties(ed *ExtensionDesc) *Properties {
|
||||
key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
|
||||
|
||||
extProp.RLock()
|
||||
if prop, ok := extProp.m[key]; ok {
|
||||
extProp.RUnlock()
|
||||
return prop
|
||||
}
|
||||
extProp.RUnlock()
|
||||
|
||||
extProp.Lock()
|
||||
defer extProp.Unlock()
|
||||
// Check again.
|
||||
if prop, ok := extProp.m[key]; ok {
|
||||
return prop
|
||||
}
|
||||
|
||||
prop := new(Properties)
|
||||
prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
|
||||
extProp.m[key] = prop
|
||||
return prop
|
||||
}
|
||||
|
||||
// HasExtension returns whether the given extension is present in pb.
|
||||
func HasExtension(pb Message, extension *ExtensionDesc) bool {
|
||||
// TODO: Check types, field numbers, etc.?
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
extmap, mu := epb.extensionsRead()
|
||||
if extmap == nil {
|
||||
return false
|
||||
}
|
||||
mu.Lock()
|
||||
_, ok := extmap[extension.Field]
|
||||
mu.Unlock()
|
||||
return ok
|
||||
}
|
||||
|
||||
// ClearExtension removes the given extension from pb.
|
||||
func ClearExtension(pb Message, extension *ExtensionDesc) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// TODO: Check types, field numbers, etc.?
|
||||
extmap := epb.extensionsWrite()
|
||||
delete(extmap, extension.Field)
|
||||
}
|
||||
|
||||
// GetExtension retrieves a proto2 extended field from pb.
|
||||
//
|
||||
// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
|
||||
// then GetExtension parses the encoded field and returns a Go value of the specified type.
|
||||
// If the field is not present, then the default value is returned (if one is specified),
|
||||
// otherwise ErrMissingExtension is reported.
|
||||
//
|
||||
// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
|
||||
// then GetExtension returns the raw encoded bytes of the field extension.
|
||||
func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if extension.ExtendedType != nil {
|
||||
// can only check type if this is a complete descriptor
|
||||
if err := checkExtensionTypes(epb, extension); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
emap, mu := epb.extensionsRead()
|
||||
if emap == nil {
|
||||
return defaultExtensionValue(extension)
|
||||
}
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
e, ok := emap[extension.Field]
|
||||
if !ok {
|
||||
// defaultExtensionValue returns the default value or
|
||||
// ErrMissingExtension if there is no default.
|
||||
return defaultExtensionValue(extension)
|
||||
}
|
||||
|
||||
if e.value != nil {
|
||||
// Already decoded. Check the descriptor, though.
|
||||
if e.desc != extension {
|
||||
// This shouldn't happen. If it does, it means that
|
||||
// GetExtension was called twice with two different
|
||||
// descriptors with the same field number.
|
||||
return nil, errors.New("proto: descriptor conflict")
|
||||
}
|
||||
return extensionAsLegacyType(e.value), nil
|
||||
}
|
||||
|
||||
if extension.ExtensionType == nil {
|
||||
// incomplete descriptor
|
||||
return e.enc, nil
|
||||
}
|
||||
|
||||
v, err := decodeExtension(e.enc, extension)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Remember the decoded version and drop the encoded version.
|
||||
// That way it is safe to mutate what we return.
|
||||
e.value = extensionAsStorageType(v)
|
||||
e.desc = extension
|
||||
e.enc = nil
|
||||
emap[extension.Field] = e
|
||||
return extensionAsLegacyType(e.value), nil
|
||||
}
|
||||
|
||||
// defaultExtensionValue returns the default value for extension.
|
||||
// If no default for an extension is defined ErrMissingExtension is returned.
|
||||
func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
|
||||
if extension.ExtensionType == nil {
|
||||
// incomplete descriptor, so no default
|
||||
return nil, ErrMissingExtension
|
||||
}
|
||||
|
||||
t := reflect.TypeOf(extension.ExtensionType)
|
||||
props := extensionProperties(extension)
|
||||
|
||||
sf, _, err := fieldDefault(t, props)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if sf == nil || sf.value == nil {
|
||||
// There is no default value.
|
||||
return nil, ErrMissingExtension
|
||||
}
|
||||
|
||||
if t.Kind() != reflect.Ptr {
|
||||
// We do not need to return a Ptr, we can directly return sf.value.
|
||||
return sf.value, nil
|
||||
}
|
||||
|
||||
// We need to return an interface{} that is a pointer to sf.value.
|
||||
value := reflect.New(t).Elem()
|
||||
value.Set(reflect.New(value.Type().Elem()))
|
||||
if sf.kind == reflect.Int32 {
|
||||
// We may have an int32 or an enum, but the underlying data is int32.
|
||||
// Since we can't set an int32 into a non int32 reflect.value directly
|
||||
// set it as a int32.
|
||||
value.Elem().SetInt(int64(sf.value.(int32)))
|
||||
} else {
|
||||
value.Elem().Set(reflect.ValueOf(sf.value))
|
||||
}
|
||||
return value.Interface(), nil
|
||||
}
|
||||
|
||||
// decodeExtension decodes an extension encoded in b.
|
||||
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
|
||||
t := reflect.TypeOf(extension.ExtensionType)
|
||||
unmarshal := typeUnmarshaler(t, extension.Tag)
|
||||
|
||||
// t is a pointer to a struct, pointer to basic type or a slice.
|
||||
// Allocate space to store the pointer/slice.
|
||||
value := reflect.New(t).Elem()
|
||||
|
||||
var err error
|
||||
for {
|
||||
x, n := decodeVarint(b)
|
||||
if n == 0 {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
b = b[n:]
|
||||
wire := int(x) & 7
|
||||
|
||||
b, err = unmarshal(b, valToPointer(value.Addr()), wire)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(b) == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return value.Interface(), nil
|
||||
}
|
||||
|
||||
// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
|
||||
// The returned slice has the same length as es; missing extensions will appear as nil elements.
|
||||
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
extensions = make([]interface{}, len(es))
|
||||
for i, e := range es {
|
||||
extensions[i], err = GetExtension(epb, e)
|
||||
if err == ErrMissingExtension {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
|
||||
// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
|
||||
// just the Field field, which defines the extension's field number.
|
||||
func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
registeredExtensions := RegisteredExtensions(pb)
|
||||
|
||||
emap, mu := epb.extensionsRead()
|
||||
if emap == nil {
|
||||
return nil, nil
|
||||
}
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
extensions := make([]*ExtensionDesc, 0, len(emap))
|
||||
for extid, e := range emap {
|
||||
desc := e.desc
|
||||
if desc == nil {
|
||||
desc = registeredExtensions[extid]
|
||||
if desc == nil {
|
||||
desc = &ExtensionDesc{Field: extid}
|
||||
}
|
||||
}
|
||||
|
||||
extensions = append(extensions, desc)
|
||||
}
|
||||
return extensions, nil
|
||||
}
|
||||
|
||||
// SetExtension sets the specified extension of pb to the specified value.
|
||||
func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := checkExtensionTypes(epb, extension); err != nil {
|
||||
return err
|
||||
}
|
||||
typ := reflect.TypeOf(extension.ExtensionType)
|
||||
if typ != reflect.TypeOf(value) {
|
||||
return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType)
|
||||
}
|
||||
// nil extension values need to be caught early, because the
|
||||
// encoder can't distinguish an ErrNil due to a nil extension
|
||||
// from an ErrNil due to a missing field. Extensions are
|
||||
// always optional, so the encoder would just swallow the error
|
||||
// and drop all the extensions from the encoded message.
|
||||
if reflect.ValueOf(value).IsNil() {
|
||||
return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
|
||||
}
|
||||
|
||||
extmap := epb.extensionsWrite()
|
||||
extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ClearAllExtensions clears all extensions from pb.
|
||||
func ClearAllExtensions(pb Message) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
m := epb.extensionsWrite()
|
||||
for k := range m {
|
||||
delete(m, k)
|
||||
}
|
||||
}
|
||||
|
||||
// A global registry of extensions.
|
||||
// The generated code will register the generated descriptors by calling RegisterExtension.
|
||||
|
||||
var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
|
||||
|
||||
// RegisterExtension is called from the generated code.
|
||||
func RegisterExtension(desc *ExtensionDesc) {
|
||||
st := reflect.TypeOf(desc.ExtendedType).Elem()
|
||||
m := extensionMaps[st]
|
||||
if m == nil {
|
||||
m = make(map[int32]*ExtensionDesc)
|
||||
extensionMaps[st] = m
|
||||
}
|
||||
if _, ok := m[desc.Field]; ok {
|
||||
panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
|
||||
}
|
||||
m[desc.Field] = desc
|
||||
}
|
||||
|
||||
// RegisteredExtensions returns a map of the registered extensions of a
|
||||
// protocol buffer struct, indexed by the extension number.
|
||||
// The argument pb should be a nil pointer to the struct type.
|
||||
func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
|
||||
return extensionMaps[reflect.TypeOf(pb).Elem()]
|
||||
}
|
||||
|
||||
// extensionAsLegacyType converts an value in the storage type as the API type.
|
||||
// See Extension.value.
|
||||
func extensionAsLegacyType(v interface{}) interface{} {
|
||||
switch rv := reflect.ValueOf(v); rv.Kind() {
|
||||
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
|
||||
// Represent primitive types as a pointer to the value.
|
||||
rv2 := reflect.New(rv.Type())
|
||||
rv2.Elem().Set(rv)
|
||||
v = rv2.Interface()
|
||||
case reflect.Ptr:
|
||||
// Represent slice types as the value itself.
|
||||
switch rv.Type().Elem().Kind() {
|
||||
case reflect.Slice:
|
||||
if rv.IsNil() {
|
||||
v = reflect.Zero(rv.Type().Elem()).Interface()
|
||||
} else {
|
||||
v = rv.Elem().Interface()
|
||||
}
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// extensionAsStorageType converts an value in the API type as the storage type.
|
||||
// See Extension.value.
|
||||
func extensionAsStorageType(v interface{}) interface{} {
|
||||
switch rv := reflect.ValueOf(v); rv.Kind() {
|
||||
case reflect.Ptr:
|
||||
// Represent slice types as the value itself.
|
||||
switch rv.Type().Elem().Kind() {
|
||||
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
|
||||
if rv.IsNil() {
|
||||
v = reflect.Zero(rv.Type().Elem()).Interface()
|
||||
} else {
|
||||
v = rv.Elem().Interface()
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
// Represent slice types as a pointer to the value.
|
||||
if rv.Type().Elem().Kind() != reflect.Uint8 {
|
||||
rv2 := reflect.New(rv.Type())
|
||||
rv2.Elem().Set(rv)
|
||||
v = rv2.Interface()
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
@ -0,0 +1,965 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
/*
|
||||
Package proto converts data structures to and from the wire format of
|
||||
protocol buffers. It works in concert with the Go source code generated
|
||||
for .proto files by the protocol compiler.
|
||||
|
||||
A summary of the properties of the protocol buffer interface
|
||||
for a protocol buffer variable v:
|
||||
|
||||
- Names are turned from camel_case to CamelCase for export.
|
||||
- There are no methods on v to set fields; just treat
|
||||
them as structure fields.
|
||||
- There are getters that return a field's value if set,
|
||||
and return the field's default value if unset.
|
||||
The getters work even if the receiver is a nil message.
|
||||
- The zero value for a struct is its correct initialization state.
|
||||
All desired fields must be set before marshaling.
|
||||
- A Reset() method will restore a protobuf struct to its zero state.
|
||||
- Non-repeated fields are pointers to the values; nil means unset.
|
||||
That is, optional or required field int32 f becomes F *int32.
|
||||
- Repeated fields are slices.
|
||||
- Helper functions are available to aid the setting of fields.
|
||||
msg.Foo = proto.String("hello") // set field
|
||||
- Constants are defined to hold the default values of all fields that
|
||||
have them. They have the form Default_StructName_FieldName.
|
||||
Because the getter methods handle defaulted values,
|
||||
direct use of these constants should be rare.
|
||||
- Enums are given type names and maps from names to values.
|
||||
Enum values are prefixed by the enclosing message's name, or by the
|
||||
enum's type name if it is a top-level enum. Enum types have a String
|
||||
method, and a Enum method to assist in message construction.
|
||||
- Nested messages, groups and enums have type names prefixed with the name of
|
||||
the surrounding message type.
|
||||
- Extensions are given descriptor names that start with E_,
|
||||
followed by an underscore-delimited list of the nested messages
|
||||
that contain it (if any) followed by the CamelCased name of the
|
||||
extension field itself. HasExtension, ClearExtension, GetExtension
|
||||
and SetExtension are functions for manipulating extensions.
|
||||
- Oneof field sets are given a single field in their message,
|
||||
with distinguished wrapper types for each possible field value.
|
||||
- Marshal and Unmarshal are functions to encode and decode the wire format.
|
||||
|
||||
When the .proto file specifies `syntax="proto3"`, there are some differences:
|
||||
|
||||
- Non-repeated fields of non-message type are values instead of pointers.
|
||||
- Enum types do not get an Enum method.
|
||||
|
||||
The simplest way to describe this is to see an example.
|
||||
Given file test.proto, containing
|
||||
|
||||
package example;
|
||||
|
||||
enum FOO { X = 17; }
|
||||
|
||||
message Test {
|
||||
required string label = 1;
|
||||
optional int32 type = 2 [default=77];
|
||||
repeated int64 reps = 3;
|
||||
optional group OptionalGroup = 4 {
|
||||
required string RequiredField = 5;
|
||||
}
|
||||
oneof union {
|
||||
int32 number = 6;
|
||||
string name = 7;
|
||||
}
|
||||
}
|
||||
|
||||
The resulting file, test.pb.go, is:
|
||||
|
||||
package example
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import math "math"
|
||||
|
||||
type FOO int32
|
||||
const (
|
||||
FOO_X FOO = 17
|
||||
)
|
||||
var FOO_name = map[int32]string{
|
||||
17: "X",
|
||||
}
|
||||
var FOO_value = map[string]int32{
|
||||
"X": 17,
|
||||
}
|
||||
|
||||
func (x FOO) Enum() *FOO {
|
||||
p := new(FOO)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
func (x FOO) String() string {
|
||||
return proto.EnumName(FOO_name, int32(x))
|
||||
}
|
||||
func (x *FOO) UnmarshalJSON(data []byte) error {
|
||||
value, err := proto.UnmarshalJSONEnum(FOO_value, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*x = FOO(value)
|
||||
return nil
|
||||
}
|
||||
|
||||
type Test struct {
|
||||
Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
|
||||
Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
|
||||
Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
|
||||
Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
|
||||
// Types that are valid to be assigned to Union:
|
||||
// *Test_Number
|
||||
// *Test_Name
|
||||
Union isTest_Union `protobuf_oneof:"union"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
func (m *Test) Reset() { *m = Test{} }
|
||||
func (m *Test) String() string { return proto.CompactTextString(m) }
|
||||
func (*Test) ProtoMessage() {}
|
||||
|
||||
type isTest_Union interface {
|
||||
isTest_Union()
|
||||
}
|
||||
|
||||
type Test_Number struct {
|
||||
Number int32 `protobuf:"varint,6,opt,name=number"`
|
||||
}
|
||||
type Test_Name struct {
|
||||
Name string `protobuf:"bytes,7,opt,name=name"`
|
||||
}
|
||||
|
||||
func (*Test_Number) isTest_Union() {}
|
||||
func (*Test_Name) isTest_Union() {}
|
||||
|
||||
func (m *Test) GetUnion() isTest_Union {
|
||||
if m != nil {
|
||||
return m.Union
|
||||
}
|
||||
return nil
|
||||
}
|
||||
const Default_Test_Type int32 = 77
|
||||
|
||||
func (m *Test) GetLabel() string {
|
||||
if m != nil && m.Label != nil {
|
||||
return *m.Label
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Test) GetType() int32 {
|
||||
if m != nil && m.Type != nil {
|
||||
return *m.Type
|
||||
}
|
||||
return Default_Test_Type
|
||||
}
|
||||
|
||||
func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
|
||||
if m != nil {
|
||||
return m.Optionalgroup
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Test_OptionalGroup struct {
|
||||
RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
|
||||
}
|
||||
func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
|
||||
func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
|
||||
|
||||
func (m *Test_OptionalGroup) GetRequiredField() string {
|
||||
if m != nil && m.RequiredField != nil {
|
||||
return *m.RequiredField
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Test) GetNumber() int32 {
|
||||
if x, ok := m.GetUnion().(*Test_Number); ok {
|
||||
return x.Number
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Test) GetName() string {
|
||||
if x, ok := m.GetUnion().(*Test_Name); ok {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
|
||||
}
|
||||
|
||||
To create and play with a Test object:
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
pb "./example.pb"
|
||||
)
|
||||
|
||||
func main() {
|
||||
test := &pb.Test{
|
||||
Label: proto.String("hello"),
|
||||
Type: proto.Int32(17),
|
||||
Reps: []int64{1, 2, 3},
|
||||
Optionalgroup: &pb.Test_OptionalGroup{
|
||||
RequiredField: proto.String("good bye"),
|
||||
},
|
||||
Union: &pb.Test_Name{"fred"},
|
||||
}
|
||||
data, err := proto.Marshal(test)
|
||||
if err != nil {
|
||||
log.Fatal("marshaling error: ", err)
|
||||
}
|
||||
newTest := &pb.Test{}
|
||||
err = proto.Unmarshal(data, newTest)
|
||||
if err != nil {
|
||||
log.Fatal("unmarshaling error: ", err)
|
||||
}
|
||||
// Now test and newTest contain the same data.
|
||||
if test.GetLabel() != newTest.GetLabel() {
|
||||
log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
|
||||
}
|
||||
// Use a type switch to determine which oneof was set.
|
||||
switch u := test.Union.(type) {
|
||||
case *pb.Test_Number: // u.Number contains the number.
|
||||
case *pb.Test_Name: // u.Name contains the string.
|
||||
}
|
||||
// etc.
|
||||
}
|
||||
*/
|
||||
package proto
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
|
||||
// Marshal reports this when a required field is not initialized.
|
||||
// Unmarshal reports this when a required field is missing from the wire data.
|
||||
type RequiredNotSetError struct{ field string }
|
||||
|
||||
func (e *RequiredNotSetError) Error() string {
|
||||
if e.field == "" {
|
||||
return fmt.Sprintf("proto: required field not set")
|
||||
}
|
||||
return fmt.Sprintf("proto: required field %q not set", e.field)
|
||||
}
|
||||
func (e *RequiredNotSetError) RequiredNotSet() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type invalidUTF8Error struct{ field string }
|
||||
|
||||
func (e *invalidUTF8Error) Error() string {
|
||||
if e.field == "" {
|
||||
return "proto: invalid UTF-8 detected"
|
||||
}
|
||||
return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
|
||||
}
|
||||
func (e *invalidUTF8Error) InvalidUTF8() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
|
||||
// This error should not be exposed to the external API as such errors should
|
||||
// be recreated with the field information.
|
||||
var errInvalidUTF8 = &invalidUTF8Error{}
|
||||
|
||||
// isNonFatal reports whether the error is either a RequiredNotSet error
|
||||
// or a InvalidUTF8 error.
|
||||
func isNonFatal(err error) bool {
|
||||
if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
|
||||
return true
|
||||
}
|
||||
if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type nonFatal struct{ E error }
|
||||
|
||||
// Merge merges err into nf and reports whether it was successful.
|
||||
// Otherwise it returns false for any fatal non-nil errors.
|
||||
func (nf *nonFatal) Merge(err error) (ok bool) {
|
||||
if err == nil {
|
||||
return true // not an error
|
||||
}
|
||||
if !isNonFatal(err) {
|
||||
return false // fatal error
|
||||
}
|
||||
if nf.E == nil {
|
||||
nf.E = err // store first instance of non-fatal error
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Message is implemented by generated protocol buffer messages.
|
||||
type Message interface {
|
||||
Reset()
|
||||
String() string
|
||||
ProtoMessage()
|
||||
}
|
||||
|
||||
// A Buffer is a buffer manager for marshaling and unmarshaling
|
||||
// protocol buffers. It may be reused between invocations to
|
||||
// reduce memory usage. It is not necessary to use a Buffer;
|
||||
// the global functions Marshal and Unmarshal create a
|
||||
// temporary Buffer and are fine for most applications.
|
||||
type Buffer struct {
|
||||
buf []byte // encode/decode byte stream
|
||||
index int // read point
|
||||
|
||||
deterministic bool
|
||||
}
|
||||
|
||||
// NewBuffer allocates a new Buffer and initializes its internal data to
|
||||
// the contents of the argument slice.
|
||||
func NewBuffer(e []byte) *Buffer {
|
||||
return &Buffer{buf: e}
|
||||
}
|
||||
|
||||
// Reset resets the Buffer, ready for marshaling a new protocol buffer.
|
||||
func (p *Buffer) Reset() {
|
||||
p.buf = p.buf[0:0] // for reading/writing
|
||||
p.index = 0 // for reading
|
||||
}
|
||||
|
||||
// SetBuf replaces the internal buffer with the slice,
|
||||
// ready for unmarshaling the contents of the slice.
|
||||
func (p *Buffer) SetBuf(s []byte) {
|
||||
p.buf = s
|
||||
p.index = 0
|
||||
}
|
||||
|
||||
// Bytes returns the contents of the Buffer.
|
||||
func (p *Buffer) Bytes() []byte { return p.buf }
|
||||
|
||||
// SetDeterministic sets whether to use deterministic serialization.
|
||||
//
|
||||
// Deterministic serialization guarantees that for a given binary, equal
|
||||
// messages will always be serialized to the same bytes. This implies:
|
||||
//
|
||||
// - Repeated serialization of a message will return the same bytes.
|
||||
// - Different processes of the same binary (which may be executing on
|
||||
// different machines) will serialize equal messages to the same bytes.
|
||||
//
|
||||
// Note that the deterministic serialization is NOT canonical across
|
||||
// languages. It is not guaranteed to remain stable over time. It is unstable
|
||||
// across different builds with schema changes due to unknown fields.
|
||||
// Users who need canonical serialization (e.g., persistent storage in a
|
||||
// canonical form, fingerprinting, etc.) should define their own
|
||||
// canonicalization specification and implement their own serializer rather
|
||||
// than relying on this API.
|
||||
//
|
||||
// If deterministic serialization is requested, map entries will be sorted
|
||||
// by keys in lexographical order. This is an implementation detail and
|
||||
// subject to change.
|
||||
func (p *Buffer) SetDeterministic(deterministic bool) {
|
||||
p.deterministic = deterministic
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper routines for simplifying the creation of optional fields of basic type.
|
||||
*/
|
||||
|
||||
// Bool is a helper routine that allocates a new bool value
|
||||
// to store v and returns a pointer to it.
|
||||
func Bool(v bool) *bool {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Int32 is a helper routine that allocates a new int32 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Int32(v int32) *int32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Int is a helper routine that allocates a new int32 value
|
||||
// to store v and returns a pointer to it, but unlike Int32
|
||||
// its argument value is an int.
|
||||
func Int(v int) *int32 {
|
||||
p := new(int32)
|
||||
*p = int32(v)
|
||||
return p
|
||||
}
|
||||
|
||||
// Int64 is a helper routine that allocates a new int64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Int64(v int64) *int64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Float32 is a helper routine that allocates a new float32 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Float32(v float32) *float32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Float64 is a helper routine that allocates a new float64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Float64(v float64) *float64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Uint32 is a helper routine that allocates a new uint32 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Uint32(v uint32) *uint32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Uint64 is a helper routine that allocates a new uint64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Uint64(v uint64) *uint64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// String is a helper routine that allocates a new string value
|
||||
// to store v and returns a pointer to it.
|
||||
func String(v string) *string {
|
||||
return &v
|
||||
}
|
||||
|
||||
// EnumName is a helper function to simplify printing protocol buffer enums
|
||||
// by name. Given an enum map and a value, it returns a useful string.
|
||||
func EnumName(m map[int32]string, v int32) string {
|
||||
s, ok := m[v]
|
||||
if ok {
|
||||
return s
|
||||
}
|
||||
return strconv.Itoa(int(v))
|
||||
}
|
||||
|
||||
// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
|
||||
// from their JSON-encoded representation. Given a map from the enum's symbolic
|
||||
// names to its int values, and a byte buffer containing the JSON-encoded
|
||||
// value, it returns an int32 that can be cast to the enum type by the caller.
|
||||
//
|
||||
// The function can deal with both JSON representations, numeric and symbolic.
|
||||
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
|
||||
if data[0] == '"' {
|
||||
// New style: enums are strings.
|
||||
var repr string
|
||||
if err := json.Unmarshal(data, &repr); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
val, ok := m[repr]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
// Old style: enums are ints.
|
||||
var val int32
|
||||
if err := json.Unmarshal(data, &val); err != nil {
|
||||
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// DebugPrint dumps the encoded data in b in a debugging format with a header
|
||||
// including the string s. Used in testing but made available for general debugging.
|
||||
func (p *Buffer) DebugPrint(s string, b []byte) {
|
||||
var u uint64
|
||||
|
||||
obuf := p.buf
|
||||
index := p.index
|
||||
p.buf = b
|
||||
p.index = 0
|
||||
depth := 0
|
||||
|
||||
fmt.Printf("\n--- %s ---\n", s)
|
||||
|
||||
out:
|
||||
for {
|
||||
for i := 0; i < depth; i++ {
|
||||
fmt.Print(" ")
|
||||
}
|
||||
|
||||
index := p.index
|
||||
if index == len(p.buf) {
|
||||
break
|
||||
}
|
||||
|
||||
op, err := p.DecodeVarint()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: fetching op err %v\n", index, err)
|
||||
break out
|
||||
}
|
||||
tag := op >> 3
|
||||
wire := op & 7
|
||||
|
||||
switch wire {
|
||||
default:
|
||||
fmt.Printf("%3d: t=%3d unknown wire=%d\n",
|
||||
index, tag, wire)
|
||||
break out
|
||||
|
||||
case WireBytes:
|
||||
var r []byte
|
||||
|
||||
r, err = p.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
|
||||
if len(r) <= 6 {
|
||||
for i := 0; i < len(r); i++ {
|
||||
fmt.Printf(" %.2x", r[i])
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < 3; i++ {
|
||||
fmt.Printf(" %.2x", r[i])
|
||||
}
|
||||
fmt.Printf(" ..")
|
||||
for i := len(r) - 3; i < len(r); i++ {
|
||||
fmt.Printf(" %.2x", r[i])
|
||||
}
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
|
||||
case WireFixed32:
|
||||
u, err = p.DecodeFixed32()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
|
||||
|
||||
case WireFixed64:
|
||||
u, err = p.DecodeFixed64()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
|
||||
|
||||
case WireVarint:
|
||||
u, err = p.DecodeVarint()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
|
||||
|
||||
case WireStartGroup:
|
||||
fmt.Printf("%3d: t=%3d start\n", index, tag)
|
||||
depth++
|
||||
|
||||
case WireEndGroup:
|
||||
depth--
|
||||
fmt.Printf("%3d: t=%3d end\n", index, tag)
|
||||
}
|
||||
}
|
||||
|
||||
if depth != 0 {
|
||||
fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
|
||||
p.buf = obuf
|
||||
p.index = index
|
||||
}
|
||||
|
||||
// SetDefaults sets unset protocol buffer fields to their default values.
|
||||
// It only modifies fields that are both unset and have defined defaults.
|
||||
// It recursively sets default values in any non-nil sub-messages.
|
||||
func SetDefaults(pb Message) {
|
||||
setDefaults(reflect.ValueOf(pb), true, false)
|
||||
}
|
||||
|
||||
// v is a pointer to a struct.
|
||||
func setDefaults(v reflect.Value, recur, zeros bool) {
|
||||
v = v.Elem()
|
||||
|
||||
defaultMu.RLock()
|
||||
dm, ok := defaults[v.Type()]
|
||||
defaultMu.RUnlock()
|
||||
if !ok {
|
||||
dm = buildDefaultMessage(v.Type())
|
||||
defaultMu.Lock()
|
||||
defaults[v.Type()] = dm
|
||||
defaultMu.Unlock()
|
||||
}
|
||||
|
||||
for _, sf := range dm.scalars {
|
||||
f := v.Field(sf.index)
|
||||
if !f.IsNil() {
|
||||
// field already set
|
||||
continue
|
||||
}
|
||||
dv := sf.value
|
||||
if dv == nil && !zeros {
|
||||
// no explicit default, and don't want to set zeros
|
||||
continue
|
||||
}
|
||||
fptr := f.Addr().Interface() // **T
|
||||
// TODO: Consider batching the allocations we do here.
|
||||
switch sf.kind {
|
||||
case reflect.Bool:
|
||||
b := new(bool)
|
||||
if dv != nil {
|
||||
*b = dv.(bool)
|
||||
}
|
||||
*(fptr.(**bool)) = b
|
||||
case reflect.Float32:
|
||||
f := new(float32)
|
||||
if dv != nil {
|
||||
*f = dv.(float32)
|
||||
}
|
||||
*(fptr.(**float32)) = f
|
||||
case reflect.Float64:
|
||||
f := new(float64)
|
||||
if dv != nil {
|
||||
*f = dv.(float64)
|
||||
}
|
||||
*(fptr.(**float64)) = f
|
||||
case reflect.Int32:
|
||||
// might be an enum
|
||||
if ft := f.Type(); ft != int32PtrType {
|
||||
// enum
|
||||
f.Set(reflect.New(ft.Elem()))
|
||||
if dv != nil {
|
||||
f.Elem().SetInt(int64(dv.(int32)))
|
||||
}
|
||||
} else {
|
||||
// int32 field
|
||||
i := new(int32)
|
||||
if dv != nil {
|
||||
*i = dv.(int32)
|
||||
}
|
||||
*(fptr.(**int32)) = i
|
||||
}
|
||||
case reflect.Int64:
|
||||
i := new(int64)
|
||||
if dv != nil {
|
||||
*i = dv.(int64)
|
||||
}
|
||||
*(fptr.(**int64)) = i
|
||||
case reflect.String:
|
||||
s := new(string)
|
||||
if dv != nil {
|
||||
*s = dv.(string)
|
||||
}
|
||||
*(fptr.(**string)) = s
|
||||
case reflect.Uint8:
|
||||
// exceptional case: []byte
|
||||
var b []byte
|
||||
if dv != nil {
|
||||
db := dv.([]byte)
|
||||
b = make([]byte, len(db))
|
||||
copy(b, db)
|
||||
} else {
|
||||
b = []byte{}
|
||||
}
|
||||
*(fptr.(*[]byte)) = b
|
||||
case reflect.Uint32:
|
||||
u := new(uint32)
|
||||
if dv != nil {
|
||||
*u = dv.(uint32)
|
||||
}
|
||||
*(fptr.(**uint32)) = u
|
||||
case reflect.Uint64:
|
||||
u := new(uint64)
|
||||
if dv != nil {
|
||||
*u = dv.(uint64)
|
||||
}
|
||||
*(fptr.(**uint64)) = u
|
||||
default:
|
||||
log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
|
||||
}
|
||||
}
|
||||
|
||||
for _, ni := range dm.nested {
|
||||
f := v.Field(ni)
|
||||
// f is *T or []*T or map[T]*T
|
||||
switch f.Kind() {
|
||||
case reflect.Ptr:
|
||||
if f.IsNil() {
|
||||
continue
|
||||
}
|
||||
setDefaults(f, recur, zeros)
|
||||
|
||||
case reflect.Slice:
|
||||
for i := 0; i < f.Len(); i++ {
|
||||
e := f.Index(i)
|
||||
if e.IsNil() {
|
||||
continue
|
||||
}
|
||||
setDefaults(e, recur, zeros)
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
for _, k := range f.MapKeys() {
|
||||
e := f.MapIndex(k)
|
||||
if e.IsNil() {
|
||||
continue
|
||||
}
|
||||
setDefaults(e, recur, zeros)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
// defaults maps a protocol buffer struct type to a slice of the fields,
|
||||
// with its scalar fields set to their proto-declared non-zero default values.
|
||||
defaultMu sync.RWMutex
|
||||
defaults = make(map[reflect.Type]defaultMessage)
|
||||
|
||||
int32PtrType = reflect.TypeOf((*int32)(nil))
|
||||
)
|
||||
|
||||
// defaultMessage represents information about the default values of a message.
|
||||
type defaultMessage struct {
|
||||
scalars []scalarField
|
||||
nested []int // struct field index of nested messages
|
||||
}
|
||||
|
||||
type scalarField struct {
|
||||
index int // struct field index
|
||||
kind reflect.Kind // element type (the T in *T or []T)
|
||||
value interface{} // the proto-declared default value, or nil
|
||||
}
|
||||
|
||||
// t is a struct type.
|
||||
func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
|
||||
sprop := GetProperties(t)
|
||||
for _, prop := range sprop.Prop {
|
||||
fi, ok := sprop.decoderTags.get(prop.Tag)
|
||||
if !ok {
|
||||
// XXX_unrecognized
|
||||
continue
|
||||
}
|
||||
ft := t.Field(fi).Type
|
||||
|
||||
sf, nested, err := fieldDefault(ft, prop)
|
||||
switch {
|
||||
case err != nil:
|
||||
log.Print(err)
|
||||
case nested:
|
||||
dm.nested = append(dm.nested, fi)
|
||||
case sf != nil:
|
||||
sf.index = fi
|
||||
dm.scalars = append(dm.scalars, *sf)
|
||||
}
|
||||
}
|
||||
|
||||
return dm
|
||||
}
|
||||
|
||||
// fieldDefault returns the scalarField for field type ft.
|
||||
// sf will be nil if the field can not have a default.
|
||||
// nestedMessage will be true if this is a nested message.
|
||||
// Note that sf.index is not set on return.
|
||||
func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
|
||||
var canHaveDefault bool
|
||||
switch ft.Kind() {
|
||||
case reflect.Ptr:
|
||||
if ft.Elem().Kind() == reflect.Struct {
|
||||
nestedMessage = true
|
||||
} else {
|
||||
canHaveDefault = true // proto2 scalar field
|
||||
}
|
||||
|
||||
case reflect.Slice:
|
||||
switch ft.Elem().Kind() {
|
||||
case reflect.Ptr:
|
||||
nestedMessage = true // repeated message
|
||||
case reflect.Uint8:
|
||||
canHaveDefault = true // bytes field
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
if ft.Elem().Kind() == reflect.Ptr {
|
||||
nestedMessage = true // map with message values
|
||||
}
|
||||
}
|
||||
|
||||
if !canHaveDefault {
|
||||
if nestedMessage {
|
||||
return nil, true, nil
|
||||
}
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
// We now know that ft is a pointer or slice.
|
||||
sf = &scalarField{kind: ft.Elem().Kind()}
|
||||
|
||||
// scalar fields without defaults
|
||||
if !prop.HasDefault {
|
||||
return sf, false, nil
|
||||
}
|
||||
|
||||
// a scalar field: either *T or []byte
|
||||
switch ft.Elem().Kind() {
|
||||
case reflect.Bool:
|
||||
x, err := strconv.ParseBool(prop.Default)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = x
|
||||
case reflect.Float32:
|
||||
x, err := strconv.ParseFloat(prop.Default, 32)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = float32(x)
|
||||
case reflect.Float64:
|
||||
x, err := strconv.ParseFloat(prop.Default, 64)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = x
|
||||
case reflect.Int32:
|
||||
x, err := strconv.ParseInt(prop.Default, 10, 32)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = int32(x)
|
||||
case reflect.Int64:
|
||||
x, err := strconv.ParseInt(prop.Default, 10, 64)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = x
|
||||
case reflect.String:
|
||||
sf.value = prop.Default
|
||||
case reflect.Uint8:
|
||||
// []byte (not *uint8)
|
||||
sf.value = []byte(prop.Default)
|
||||
case reflect.Uint32:
|
||||
x, err := strconv.ParseUint(prop.Default, 10, 32)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = uint32(x)
|
||||
case reflect.Uint64:
|
||||
x, err := strconv.ParseUint(prop.Default, 10, 64)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = x
|
||||
default:
|
||||
return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
|
||||
}
|
||||
|
||||
return sf, false, nil
|
||||
}
|
||||
|
||||
// mapKeys returns a sort.Interface to be used for sorting the map keys.
|
||||
// Map fields may have key types of non-float scalars, strings and enums.
|
||||
func mapKeys(vs []reflect.Value) sort.Interface {
|
||||
s := mapKeySorter{vs: vs}
|
||||
|
||||
// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
|
||||
if len(vs) == 0 {
|
||||
return s
|
||||
}
|
||||
switch vs[0].Kind() {
|
||||
case reflect.Int32, reflect.Int64:
|
||||
s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
|
||||
case reflect.Uint32, reflect.Uint64:
|
||||
s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
|
||||
case reflect.Bool:
|
||||
s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
|
||||
case reflect.String:
|
||||
s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
type mapKeySorter struct {
|
||||
vs []reflect.Value
|
||||
less func(a, b reflect.Value) bool
|
||||
}
|
||||
|
||||
func (s mapKeySorter) Len() int { return len(s.vs) }
|
||||
func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
|
||||
func (s mapKeySorter) Less(i, j int) bool {
|
||||
return s.less(s.vs[i], s.vs[j])
|
||||
}
|
||||
|
||||
// isProto3Zero reports whether v is a zero proto3 value.
|
||||
func isProto3Zero(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint32, reflect.Uint64:
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.String:
|
||||
return v.String() == ""
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
const (
|
||||
// ProtoPackageIsVersion3 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
ProtoPackageIsVersion3 = true
|
||||
|
||||
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
ProtoPackageIsVersion2 = true
|
||||
|
||||
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
ProtoPackageIsVersion1 = true
|
||||
)
|
||||
|
||||
// InternalMessageInfo is a type used internally by generated .pb.go files.
|
||||
// This type is not intended to be used by non-generated code.
|
||||
// This type is not subject to any compatibility guarantee.
|
||||
type InternalMessageInfo struct {
|
||||
marshal *marshalInfo
|
||||
unmarshal *unmarshalInfo
|
||||
merge *mergeInfo
|
||||
discard *discardInfo
|
||||
}
|
@ -0,0 +1,181 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Support for message sets.
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
|
||||
// A message type ID is required for storing a protocol buffer in a message set.
|
||||
var errNoMessageTypeID = errors.New("proto does not have a message type ID")
|
||||
|
||||
// The first two types (_MessageSet_Item and messageSet)
|
||||
// model what the protocol compiler produces for the following protocol message:
|
||||
// message MessageSet {
|
||||
// repeated group Item = 1 {
|
||||
// required int32 type_id = 2;
|
||||
// required string message = 3;
|
||||
// };
|
||||
// }
|
||||
// That is the MessageSet wire format. We can't use a proto to generate these
|
||||
// because that would introduce a circular dependency between it and this package.
|
||||
|
||||
type _MessageSet_Item struct {
|
||||
TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
|
||||
Message []byte `protobuf:"bytes,3,req,name=message"`
|
||||
}
|
||||
|
||||
type messageSet struct {
|
||||
Item []*_MessageSet_Item `protobuf:"group,1,rep"`
|
||||
XXX_unrecognized []byte
|
||||
// TODO: caching?
|
||||
}
|
||||
|
||||
// Make sure messageSet is a Message.
|
||||
var _ Message = (*messageSet)(nil)
|
||||
|
||||
// messageTypeIder is an interface satisfied by a protocol buffer type
|
||||
// that may be stored in a MessageSet.
|
||||
type messageTypeIder interface {
|
||||
MessageTypeId() int32
|
||||
}
|
||||
|
||||
func (ms *messageSet) find(pb Message) *_MessageSet_Item {
|
||||
mti, ok := pb.(messageTypeIder)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
id := mti.MessageTypeId()
|
||||
for _, item := range ms.Item {
|
||||
if *item.TypeId == id {
|
||||
return item
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *messageSet) Has(pb Message) bool {
|
||||
return ms.find(pb) != nil
|
||||
}
|
||||
|
||||
func (ms *messageSet) Unmarshal(pb Message) error {
|
||||
if item := ms.find(pb); item != nil {
|
||||
return Unmarshal(item.Message, pb)
|
||||
}
|
||||
if _, ok := pb.(messageTypeIder); !ok {
|
||||
return errNoMessageTypeID
|
||||
}
|
||||
return nil // TODO: return error instead?
|
||||
}
|
||||
|
||||
func (ms *messageSet) Marshal(pb Message) error {
|
||||
msg, err := Marshal(pb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if item := ms.find(pb); item != nil {
|
||||
// reuse existing item
|
||||
item.Message = msg
|
||||
return nil
|
||||
}
|
||||
|
||||
mti, ok := pb.(messageTypeIder)
|
||||
if !ok {
|
||||
return errNoMessageTypeID
|
||||
}
|
||||
|
||||
mtid := mti.MessageTypeId()
|
||||
ms.Item = append(ms.Item, &_MessageSet_Item{
|
||||
TypeId: &mtid,
|
||||
Message: msg,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *messageSet) Reset() { *ms = messageSet{} }
|
||||
func (ms *messageSet) String() string { return CompactTextString(ms) }
|
||||
func (*messageSet) ProtoMessage() {}
|
||||
|
||||
// Support for the message_set_wire_format message option.
|
||||
|
||||
func skipVarint(buf []byte) []byte {
|
||||
i := 0
|
||||
for ; buf[i]&0x80 != 0; i++ {
|
||||
}
|
||||
return buf[i+1:]
|
||||
}
|
||||
|
||||
// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
|
||||
// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func unmarshalMessageSet(buf []byte, exts interface{}) error {
|
||||
var m map[int32]Extension
|
||||
switch exts := exts.(type) {
|
||||
case *XXX_InternalExtensions:
|
||||
m = exts.extensionsWrite()
|
||||
case map[int32]Extension:
|
||||
m = exts
|
||||
default:
|
||||
return errors.New("proto: not an extension map")
|
||||
}
|
||||
|
||||
ms := new(messageSet)
|
||||
if err := Unmarshal(buf, ms); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, item := range ms.Item {
|
||||
id := *item.TypeId
|
||||
msg := item.Message
|
||||
|
||||
// Restore wire type and field number varint, plus length varint.
|
||||
// Be careful to preserve duplicate items.
|
||||
b := EncodeVarint(uint64(id)<<3 | WireBytes)
|
||||
if ext, ok := m[id]; ok {
|
||||
// Existing data; rip off the tag and length varint
|
||||
// so we join the new data correctly.
|
||||
// We can assume that ext.enc is set because we are unmarshaling.
|
||||
o := ext.enc[len(b):] // skip wire type and field number
|
||||
_, n := DecodeVarint(o) // calculate length of length varint
|
||||
o = o[n:] // skip length varint
|
||||
msg = append(o, msg...) // join old data and new data
|
||||
}
|
||||
b = append(b, EncodeVarint(uint64(len(msg)))...)
|
||||
b = append(b, msg...)
|
||||
|
||||
m[id] = Extension{enc: b}
|
||||
}
|
||||
return nil
|
||||
}
|
@ -0,0 +1,360 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// +build purego appengine js
|
||||
|
||||
// This file contains an implementation of proto field accesses using package reflect.
|
||||
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
|
||||
// be used on App Engine.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const unsafeAllowed = false
|
||||
|
||||
// A field identifies a field in a struct, accessible from a pointer.
|
||||
// In this implementation, a field is identified by the sequence of field indices
|
||||
// passed to reflect's FieldByIndex.
|
||||
type field []int
|
||||
|
||||
// toField returns a field equivalent to the given reflect field.
|
||||
func toField(f *reflect.StructField) field {
|
||||
return f.Index
|
||||
}
|
||||
|
||||
// invalidField is an invalid field identifier.
|
||||
var invalidField = field(nil)
|
||||
|
||||
// zeroField is a noop when calling pointer.offset.
|
||||
var zeroField = field([]int{})
|
||||
|
||||
// IsValid reports whether the field identifier is valid.
|
||||
func (f field) IsValid() bool { return f != nil }
|
||||
|
||||
// The pointer type is for the table-driven decoder.
|
||||
// The implementation here uses a reflect.Value of pointer type to
|
||||
// create a generic pointer. In pointer_unsafe.go we use unsafe
|
||||
// instead of reflect to implement the same (but faster) interface.
|
||||
type pointer struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
// toPointer converts an interface of pointer type to a pointer
|
||||
// that points to the same target.
|
||||
func toPointer(i *Message) pointer {
|
||||
return pointer{v: reflect.ValueOf(*i)}
|
||||
}
|
||||
|
||||
// toAddrPointer converts an interface to a pointer that points to
|
||||
// the interface data.
|
||||
func toAddrPointer(i *interface{}, isptr, deref bool) pointer {
|
||||
v := reflect.ValueOf(*i)
|
||||
u := reflect.New(v.Type())
|
||||
u.Elem().Set(v)
|
||||
if deref {
|
||||
u = u.Elem()
|
||||
}
|
||||
return pointer{v: u}
|
||||
}
|
||||
|
||||
// valToPointer converts v to a pointer. v must be of pointer type.
|
||||
func valToPointer(v reflect.Value) pointer {
|
||||
return pointer{v: v}
|
||||
}
|
||||
|
||||
// offset converts from a pointer to a structure to a pointer to
|
||||
// one of its fields.
|
||||
func (p pointer) offset(f field) pointer {
|
||||
return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
|
||||
}
|
||||
|
||||
func (p pointer) isNil() bool {
|
||||
return p.v.IsNil()
|
||||
}
|
||||
|
||||
// grow updates the slice s in place to make it one element longer.
|
||||
// s must be addressable.
|
||||
// Returns the (addressable) new element.
|
||||
func grow(s reflect.Value) reflect.Value {
|
||||
n, m := s.Len(), s.Cap()
|
||||
if n < m {
|
||||
s.SetLen(n + 1)
|
||||
} else {
|
||||
s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
|
||||
}
|
||||
return s.Index(n)
|
||||
}
|
||||
|
||||
func (p pointer) toInt64() *int64 {
|
||||
return p.v.Interface().(*int64)
|
||||
}
|
||||
func (p pointer) toInt64Ptr() **int64 {
|
||||
return p.v.Interface().(**int64)
|
||||
}
|
||||
func (p pointer) toInt64Slice() *[]int64 {
|
||||
return p.v.Interface().(*[]int64)
|
||||
}
|
||||
|
||||
var int32ptr = reflect.TypeOf((*int32)(nil))
|
||||
|
||||
func (p pointer) toInt32() *int32 {
|
||||
return p.v.Convert(int32ptr).Interface().(*int32)
|
||||
}
|
||||
|
||||
// The toInt32Ptr/Slice methods don't work because of enums.
|
||||
// Instead, we must use set/get methods for the int32ptr/slice case.
|
||||
/*
|
||||
func (p pointer) toInt32Ptr() **int32 {
|
||||
return p.v.Interface().(**int32)
|
||||
}
|
||||
func (p pointer) toInt32Slice() *[]int32 {
|
||||
return p.v.Interface().(*[]int32)
|
||||
}
|
||||
*/
|
||||
func (p pointer) getInt32Ptr() *int32 {
|
||||
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
|
||||
// raw int32 type
|
||||
return p.v.Elem().Interface().(*int32)
|
||||
}
|
||||
// an enum
|
||||
return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
|
||||
}
|
||||
func (p pointer) setInt32Ptr(v int32) {
|
||||
// Allocate value in a *int32. Possibly convert that to a *enum.
|
||||
// Then assign it to a **int32 or **enum.
|
||||
// Note: we can convert *int32 to *enum, but we can't convert
|
||||
// **int32 to **enum!
|
||||
p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
|
||||
}
|
||||
|
||||
// getInt32Slice copies []int32 from p as a new slice.
|
||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||
func (p pointer) getInt32Slice() []int32 {
|
||||
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
|
||||
// raw int32 type
|
||||
return p.v.Elem().Interface().([]int32)
|
||||
}
|
||||
// an enum
|
||||
// Allocate a []int32, then assign []enum's values into it.
|
||||
// Note: we can't convert []enum to []int32.
|
||||
slice := p.v.Elem()
|
||||
s := make([]int32, slice.Len())
|
||||
for i := 0; i < slice.Len(); i++ {
|
||||
s[i] = int32(slice.Index(i).Int())
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// setInt32Slice copies []int32 into p as a new slice.
|
||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||
func (p pointer) setInt32Slice(v []int32) {
|
||||
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
|
||||
// raw int32 type
|
||||
p.v.Elem().Set(reflect.ValueOf(v))
|
||||
return
|
||||
}
|
||||
// an enum
|
||||
// Allocate a []enum, then assign []int32's values into it.
|
||||
// Note: we can't convert []enum to []int32.
|
||||
slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
|
||||
for i, x := range v {
|
||||
slice.Index(i).SetInt(int64(x))
|
||||
}
|
||||
p.v.Elem().Set(slice)
|
||||
}
|
||||
func (p pointer) appendInt32Slice(v int32) {
|
||||
grow(p.v.Elem()).SetInt(int64(v))
|
||||
}
|
||||
|
||||
func (p pointer) toUint64() *uint64 {
|
||||
return p.v.Interface().(*uint64)
|
||||
}
|
||||
func (p pointer) toUint64Ptr() **uint64 {
|
||||
return p.v.Interface().(**uint64)
|
||||
}
|
||||
func (p pointer) toUint64Slice() *[]uint64 {
|
||||
return p.v.Interface().(*[]uint64)
|
||||
}
|
||||
func (p pointer) toUint32() *uint32 {
|
||||
return p.v.Interface().(*uint32)
|
||||
}
|
||||
func (p pointer) toUint32Ptr() **uint32 {
|
||||
return p.v.Interface().(**uint32)
|
||||
}
|
||||
func (p pointer) toUint32Slice() *[]uint32 {
|
||||
return p.v.Interface().(*[]uint32)
|
||||
}
|
||||
func (p pointer) toBool() *bool {
|
||||
return p.v.Interface().(*bool)
|
||||
}
|
||||
func (p pointer) toBoolPtr() **bool {
|
||||
return p.v.Interface().(**bool)
|
||||
}
|
||||
func (p pointer) toBoolSlice() *[]bool {
|
||||
return p.v.Interface().(*[]bool)
|
||||
}
|
||||
func (p pointer) toFloat64() *float64 {
|
||||
return p.v.Interface().(*float64)
|
||||
}
|
||||
func (p pointer) toFloat64Ptr() **float64 {
|
||||
return p.v.Interface().(**float64)
|
||||
}
|
||||
func (p pointer) toFloat64Slice() *[]float64 {
|
||||
return p.v.Interface().(*[]float64)
|
||||
}
|
||||
func (p pointer) toFloat32() *float32 {
|
||||
return p.v.Interface().(*float32)
|
||||
}
|
||||
func (p pointer) toFloat32Ptr() **float32 {
|
||||
return p.v.Interface().(**float32)
|
||||
}
|
||||
func (p pointer) toFloat32Slice() *[]float32 {
|
||||
return p.v.Interface().(*[]float32)
|
||||
}
|
||||
func (p pointer) toString() *string {
|
||||
return p.v.Interface().(*string)
|
||||
}
|
||||
func (p pointer) toStringPtr() **string {
|
||||
return p.v.Interface().(**string)
|
||||
}
|
||||
func (p pointer) toStringSlice() *[]string {
|
||||
return p.v.Interface().(*[]string)
|
||||
}
|
||||
func (p pointer) toBytes() *[]byte {
|
||||
return p.v.Interface().(*[]byte)
|
||||
}
|
||||
func (p pointer) toBytesSlice() *[][]byte {
|
||||
return p.v.Interface().(*[][]byte)
|
||||
}
|
||||
func (p pointer) toExtensions() *XXX_InternalExtensions {
|
||||
return p.v.Interface().(*XXX_InternalExtensions)
|
||||
}
|
||||
func (p pointer) toOldExtensions() *map[int32]Extension {
|
||||
return p.v.Interface().(*map[int32]Extension)
|
||||
}
|
||||
func (p pointer) getPointer() pointer {
|
||||
return pointer{v: p.v.Elem()}
|
||||
}
|
||||
func (p pointer) setPointer(q pointer) {
|
||||
p.v.Elem().Set(q.v)
|
||||
}
|
||||
func (p pointer) appendPointer(q pointer) {
|
||||
grow(p.v.Elem()).Set(q.v)
|
||||
}
|
||||
|
||||
// getPointerSlice copies []*T from p as a new []pointer.
|
||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||
func (p pointer) getPointerSlice() []pointer {
|
||||
if p.v.IsNil() {
|
||||
return nil
|
||||
}
|
||||
n := p.v.Elem().Len()
|
||||
s := make([]pointer, n)
|
||||
for i := 0; i < n; i++ {
|
||||
s[i] = pointer{v: p.v.Elem().Index(i)}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// setPointerSlice copies []pointer into p as a new []*T.
|
||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||
func (p pointer) setPointerSlice(v []pointer) {
|
||||
if v == nil {
|
||||
p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
|
||||
return
|
||||
}
|
||||
s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
|
||||
for _, p := range v {
|
||||
s = reflect.Append(s, p.v)
|
||||
}
|
||||
p.v.Elem().Set(s)
|
||||
}
|
||||
|
||||
// getInterfacePointer returns a pointer that points to the
|
||||
// interface data of the interface pointed by p.
|
||||
func (p pointer) getInterfacePointer() pointer {
|
||||
if p.v.Elem().IsNil() {
|
||||
return pointer{v: p.v.Elem()}
|
||||
}
|
||||
return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
|
||||
}
|
||||
|
||||
func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
|
||||
// TODO: check that p.v.Type().Elem() == t?
|
||||
return p.v
|
||||
}
|
||||
|
||||
func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
return *p
|
||||
}
|
||||
func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
*p = v
|
||||
}
|
||||
func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
return *p
|
||||
}
|
||||
func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
*p = v
|
||||
}
|
||||
func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
return *p
|
||||
}
|
||||
func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
*p = v
|
||||
}
|
||||
func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
return *p
|
||||
}
|
||||
func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
*p = v
|
||||
}
|
||||
|
||||
var atomicLock sync.Mutex
|
@ -0,0 +1,313 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// +build !purego,!appengine,!js
|
||||
|
||||
// This file contains the implementation of the proto field accesses using package unsafe.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const unsafeAllowed = true
|
||||
|
||||
// A field identifies a field in a struct, accessible from a pointer.
|
||||
// In this implementation, a field is identified by its byte offset from the start of the struct.
|
||||
type field uintptr
|
||||
|
||||
// toField returns a field equivalent to the given reflect field.
|
||||
func toField(f *reflect.StructField) field {
|
||||
return field(f.Offset)
|
||||
}
|
||||
|
||||
// invalidField is an invalid field identifier.
|
||||
const invalidField = ^field(0)
|
||||
|
||||
// zeroField is a noop when calling pointer.offset.
|
||||
const zeroField = field(0)
|
||||
|
||||
// IsValid reports whether the field identifier is valid.
|
||||
func (f field) IsValid() bool {
|
||||
return f != invalidField
|
||||
}
|
||||
|
||||
// The pointer type below is for the new table-driven encoder/decoder.
|
||||
// The implementation here uses unsafe.Pointer to create a generic pointer.
|
||||
// In pointer_reflect.go we use reflect instead of unsafe to implement
|
||||
// the same (but slower) interface.
|
||||
type pointer struct {
|
||||
p unsafe.Pointer
|
||||
}
|
||||
|
||||
// size of pointer
|
||||
var ptrSize = unsafe.Sizeof(uintptr(0))
|
||||
|
||||
// toPointer converts an interface of pointer type to a pointer
|
||||
// that points to the same target.
|
||||
func toPointer(i *Message) pointer {
|
||||
// Super-tricky - read pointer out of data word of interface value.
|
||||
// Saves ~25ns over the equivalent:
|
||||
// return valToPointer(reflect.ValueOf(*i))
|
||||
return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
|
||||
}
|
||||
|
||||
// toAddrPointer converts an interface to a pointer that points to
|
||||
// the interface data.
|
||||
func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) {
|
||||
// Super-tricky - read or get the address of data word of interface value.
|
||||
if isptr {
|
||||
// The interface is of pointer type, thus it is a direct interface.
|
||||
// The data word is the pointer data itself. We take its address.
|
||||
p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
|
||||
} else {
|
||||
// The interface is not of pointer type. The data word is the pointer
|
||||
// to the data.
|
||||
p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
|
||||
}
|
||||
if deref {
|
||||
p.p = *(*unsafe.Pointer)(p.p)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// valToPointer converts v to a pointer. v must be of pointer type.
|
||||
func valToPointer(v reflect.Value) pointer {
|
||||
return pointer{p: unsafe.Pointer(v.Pointer())}
|
||||
}
|
||||
|
||||
// offset converts from a pointer to a structure to a pointer to
|
||||
// one of its fields.
|
||||
func (p pointer) offset(f field) pointer {
|
||||
// For safety, we should panic if !f.IsValid, however calling panic causes
|
||||
// this to no longer be inlineable, which is a serious performance cost.
|
||||
/*
|
||||
if !f.IsValid() {
|
||||
panic("invalid field")
|
||||
}
|
||||
*/
|
||||
return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
|
||||
}
|
||||
|
||||
func (p pointer) isNil() bool {
|
||||
return p.p == nil
|
||||
}
|
||||
|
||||
func (p pointer) toInt64() *int64 {
|
||||
return (*int64)(p.p)
|
||||
}
|
||||
func (p pointer) toInt64Ptr() **int64 {
|
||||
return (**int64)(p.p)
|
||||
}
|
||||
func (p pointer) toInt64Slice() *[]int64 {
|
||||
return (*[]int64)(p.p)
|
||||
}
|
||||
func (p pointer) toInt32() *int32 {
|
||||
return (*int32)(p.p)
|
||||
}
|
||||
|
||||
// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
|
||||
/*
|
||||
func (p pointer) toInt32Ptr() **int32 {
|
||||
return (**int32)(p.p)
|
||||
}
|
||||
func (p pointer) toInt32Slice() *[]int32 {
|
||||
return (*[]int32)(p.p)
|
||||
}
|
||||
*/
|
||||
func (p pointer) getInt32Ptr() *int32 {
|
||||
return *(**int32)(p.p)
|
||||
}
|
||||
func (p pointer) setInt32Ptr(v int32) {
|
||||
*(**int32)(p.p) = &v
|
||||
}
|
||||
|
||||
// getInt32Slice loads a []int32 from p.
|
||||
// The value returned is aliased with the original slice.
|
||||
// This behavior differs from the implementation in pointer_reflect.go.
|
||||
func (p pointer) getInt32Slice() []int32 {
|
||||
return *(*[]int32)(p.p)
|
||||
}
|
||||
|
||||
// setInt32Slice stores a []int32 to p.
|
||||
// The value set is aliased with the input slice.
|
||||
// This behavior differs from the implementation in pointer_reflect.go.
|
||||
func (p pointer) setInt32Slice(v []int32) {
|
||||
*(*[]int32)(p.p) = v
|
||||
}
|
||||
|
||||
// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
|
||||
func (p pointer) appendInt32Slice(v int32) {
|
||||
s := (*[]int32)(p.p)
|
||||
*s = append(*s, v)
|
||||
}
|
||||
|
||||
func (p pointer) toUint64() *uint64 {
|
||||
return (*uint64)(p.p)
|
||||
}
|
||||
func (p pointer) toUint64Ptr() **uint64 {
|
||||
return (**uint64)(p.p)
|
||||
}
|
||||
func (p pointer) toUint64Slice() *[]uint64 {
|
||||
return (*[]uint64)(p.p)
|
||||
}
|
||||
func (p pointer) toUint32() *uint32 {
|
||||
return (*uint32)(p.p)
|
||||
}
|
||||
func (p pointer) toUint32Ptr() **uint32 {
|
||||
return (**uint32)(p.p)
|
||||
}
|
||||
func (p pointer) toUint32Slice() *[]uint32 {
|
||||
return (*[]uint32)(p.p)
|
||||
}
|
||||
func (p pointer) toBool() *bool {
|
||||
return (*bool)(p.p)
|
||||
}
|
||||
func (p pointer) toBoolPtr() **bool {
|
||||
return (**bool)(p.p)
|
||||
}
|
||||
func (p pointer) toBoolSlice() *[]bool {
|
||||
return (*[]bool)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat64() *float64 {
|
||||
return (*float64)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat64Ptr() **float64 {
|
||||
return (**float64)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat64Slice() *[]float64 {
|
||||
return (*[]float64)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat32() *float32 {
|
||||
return (*float32)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat32Ptr() **float32 {
|
||||
return (**float32)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat32Slice() *[]float32 {
|
||||
return (*[]float32)(p.p)
|
||||
}
|
||||
func (p pointer) toString() *string {
|
||||
return (*string)(p.p)
|
||||
}
|
||||
func (p pointer) toStringPtr() **string {
|
||||
return (**string)(p.p)
|
||||
}
|
||||
func (p pointer) toStringSlice() *[]string {
|
||||
return (*[]string)(p.p)
|
||||
}
|
||||
func (p pointer) toBytes() *[]byte {
|
||||
return (*[]byte)(p.p)
|
||||
}
|
||||
func (p pointer) toBytesSlice() *[][]byte {
|
||||
return (*[][]byte)(p.p)
|
||||
}
|
||||
func (p pointer) toExtensions() *XXX_InternalExtensions {
|
||||
return (*XXX_InternalExtensions)(p.p)
|
||||
}
|
||||
func (p pointer) toOldExtensions() *map[int32]Extension {
|
||||
return (*map[int32]Extension)(p.p)
|
||||
}
|
||||
|
||||
// getPointerSlice loads []*T from p as a []pointer.
|
||||
// The value returned is aliased with the original slice.
|
||||
// This behavior differs from the implementation in pointer_reflect.go.
|
||||
func (p pointer) getPointerSlice() []pointer {
|
||||
// Super-tricky - p should point to a []*T where T is a
|
||||
// message type. We load it as []pointer.
|
||||
return *(*[]pointer)(p.p)
|
||||
}
|
||||
|
||||
// setPointerSlice stores []pointer into p as a []*T.
|
||||
// The value set is aliased with the input slice.
|
||||
// This behavior differs from the implementation in pointer_reflect.go.
|
||||
func (p pointer) setPointerSlice(v []pointer) {
|
||||
// Super-tricky - p should point to a []*T where T is a
|
||||
// message type. We store it as []pointer.
|
||||
*(*[]pointer)(p.p) = v
|
||||
}
|
||||
|
||||
// getPointer loads the pointer at p and returns it.
|
||||
func (p pointer) getPointer() pointer {
|
||||
return pointer{p: *(*unsafe.Pointer)(p.p)}
|
||||
}
|
||||
|
||||
// setPointer stores the pointer q at p.
|
||||
func (p pointer) setPointer(q pointer) {
|
||||
*(*unsafe.Pointer)(p.p) = q.p
|
||||
}
|
||||
|
||||
// append q to the slice pointed to by p.
|
||||
func (p pointer) appendPointer(q pointer) {
|
||||
s := (*[]unsafe.Pointer)(p.p)
|
||||
*s = append(*s, q.p)
|
||||
}
|
||||
|
||||
// getInterfacePointer returns a pointer that points to the
|
||||
// interface data of the interface pointed by p.
|
||||
func (p pointer) getInterfacePointer() pointer {
|
||||
// Super-tricky - read pointer out of data word of interface value.
|
||||
return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
|
||||
}
|
||||
|
||||
// asPointerTo returns a reflect.Value that is a pointer to an
|
||||
// object of type t stored at p.
|
||||
func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
|
||||
return reflect.NewAt(t, p.p)
|
||||
}
|
||||
|
||||
func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
|
||||
return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||
}
|
||||
func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
|
||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||
}
|
||||
func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
|
||||
return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||
}
|
||||
func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
|
||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||
}
|
||||
func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
|
||||
return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||
}
|
||||
func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
|
||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||
}
|
||||
func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
|
||||
return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||
}
|
||||
func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
|
||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||
}
|
@ -0,0 +1,544 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Routines for encoding data into the wire format for protocol buffers.
|
||||
*/
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const debug bool = false
|
||||
|
||||
// Constants that identify the encoding of a value on the wire.
|
||||
const (
|
||||
WireVarint = 0
|
||||
WireFixed64 = 1
|
||||
WireBytes = 2
|
||||
WireStartGroup = 3
|
||||
WireEndGroup = 4
|
||||
WireFixed32 = 5
|
||||
)
|
||||
|
||||
// tagMap is an optimization over map[int]int for typical protocol buffer
|
||||
// use-cases. Encoded protocol buffers are often in tag order with small tag
|
||||
// numbers.
|
||||
type tagMap struct {
|
||||
fastTags []int
|
||||
slowTags map[int]int
|
||||
}
|
||||
|
||||
// tagMapFastLimit is the upper bound on the tag number that will be stored in
|
||||
// the tagMap slice rather than its map.
|
||||
const tagMapFastLimit = 1024
|
||||
|
||||
func (p *tagMap) get(t int) (int, bool) {
|
||||
if t > 0 && t < tagMapFastLimit {
|
||||
if t >= len(p.fastTags) {
|
||||
return 0, false
|
||||
}
|
||||
fi := p.fastTags[t]
|
||||
return fi, fi >= 0
|
||||
}
|
||||
fi, ok := p.slowTags[t]
|
||||
return fi, ok
|
||||
}
|
||||
|
||||
func (p *tagMap) put(t int, fi int) {
|
||||
if t > 0 && t < tagMapFastLimit {
|
||||
for len(p.fastTags) < t+1 {
|
||||
p.fastTags = append(p.fastTags, -1)
|
||||
}
|
||||
p.fastTags[t] = fi
|
||||
return
|
||||
}
|
||||
if p.slowTags == nil {
|
||||
p.slowTags = make(map[int]int)
|
||||
}
|
||||
p.slowTags[t] = fi
|
||||
}
|
||||
|
||||
// StructProperties represents properties for all the fields of a struct.
|
||||
// decoderTags and decoderOrigNames should only be used by the decoder.
|
||||
type StructProperties struct {
|
||||
Prop []*Properties // properties for each field
|
||||
reqCount int // required count
|
||||
decoderTags tagMap // map from proto tag to struct field number
|
||||
decoderOrigNames map[string]int // map from original name to struct field number
|
||||
order []int // list of struct field numbers in tag order
|
||||
|
||||
// OneofTypes contains information about the oneof fields in this message.
|
||||
// It is keyed by the original name of a field.
|
||||
OneofTypes map[string]*OneofProperties
|
||||
}
|
||||
|
||||
// OneofProperties represents information about a specific field in a oneof.
|
||||
type OneofProperties struct {
|
||||
Type reflect.Type // pointer to generated struct type for this oneof field
|
||||
Field int // struct field number of the containing oneof in the message
|
||||
Prop *Properties
|
||||
}
|
||||
|
||||
// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
|
||||
// See encode.go, (*Buffer).enc_struct.
|
||||
|
||||
func (sp *StructProperties) Len() int { return len(sp.order) }
|
||||
func (sp *StructProperties) Less(i, j int) bool {
|
||||
return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
|
||||
}
|
||||
func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
|
||||
|
||||
// Properties represents the protocol-specific behavior of a single struct field.
|
||||
type Properties struct {
|
||||
Name string // name of the field, for error messages
|
||||
OrigName string // original name before protocol compiler (always set)
|
||||
JSONName string // name to use for JSON; determined by protoc
|
||||
Wire string
|
||||
WireType int
|
||||
Tag int
|
||||
Required bool
|
||||
Optional bool
|
||||
Repeated bool
|
||||
Packed bool // relevant for repeated primitives only
|
||||
Enum string // set for enum types only
|
||||
proto3 bool // whether this is known to be a proto3 field
|
||||
oneof bool // whether this is a oneof field
|
||||
|
||||
Default string // default value
|
||||
HasDefault bool // whether an explicit default was provided
|
||||
|
||||
stype reflect.Type // set for struct types only
|
||||
sprop *StructProperties // set for struct types only
|
||||
|
||||
mtype reflect.Type // set for map types only
|
||||
MapKeyProp *Properties // set for map types only
|
||||
MapValProp *Properties // set for map types only
|
||||
}
|
||||
|
||||
// String formats the properties in the protobuf struct field tag style.
|
||||
func (p *Properties) String() string {
|
||||
s := p.Wire
|
||||
s += ","
|
||||
s += strconv.Itoa(p.Tag)
|
||||
if p.Required {
|
||||
s += ",req"
|
||||
}
|
||||
if p.Optional {
|
||||
s += ",opt"
|
||||
}
|
||||
if p.Repeated {
|
||||
s += ",rep"
|
||||
}
|
||||
if p.Packed {
|
||||
s += ",packed"
|
||||
}
|
||||
s += ",name=" + p.OrigName
|
||||
if p.JSONName != p.OrigName {
|
||||
s += ",json=" + p.JSONName
|
||||
}
|
||||
if p.proto3 {
|
||||
s += ",proto3"
|
||||
}
|
||||
if p.oneof {
|
||||
s += ",oneof"
|
||||
}
|
||||
if len(p.Enum) > 0 {
|
||||
s += ",enum=" + p.Enum
|
||||
}
|
||||
if p.HasDefault {
|
||||
s += ",def=" + p.Default
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Parse populates p by parsing a string in the protobuf struct field tag style.
|
||||
func (p *Properties) Parse(s string) {
|
||||
// "bytes,49,opt,name=foo,def=hello!"
|
||||
fields := strings.Split(s, ",") // breaks def=, but handled below.
|
||||
if len(fields) < 2 {
|
||||
log.Printf("proto: tag has too few fields: %q", s)
|
||||
return
|
||||
}
|
||||
|
||||
p.Wire = fields[0]
|
||||
switch p.Wire {
|
||||
case "varint":
|
||||
p.WireType = WireVarint
|
||||
case "fixed32":
|
||||
p.WireType = WireFixed32
|
||||
case "fixed64":
|
||||
p.WireType = WireFixed64
|
||||
case "zigzag32":
|
||||
p.WireType = WireVarint
|
||||
case "zigzag64":
|
||||
p.WireType = WireVarint
|
||||
case "bytes", "group":
|
||||
p.WireType = WireBytes
|
||||
// no numeric converter for non-numeric types
|
||||
default:
|
||||
log.Printf("proto: tag has unknown wire type: %q", s)
|
||||
return
|
||||
}
|
||||
|
||||
var err error
|
||||
p.Tag, err = strconv.Atoi(fields[1])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
outer:
|
||||
for i := 2; i < len(fields); i++ {
|
||||
f := fields[i]
|
||||
switch {
|
||||
case f == "req":
|
||||
p.Required = true
|
||||
case f == "opt":
|
||||
p.Optional = true
|
||||
case f == "rep":
|
||||
p.Repeated = true
|
||||
case f == "packed":
|
||||
p.Packed = true
|
||||
case strings.HasPrefix(f, "name="):
|
||||
p.OrigName = f[5:]
|
||||
case strings.HasPrefix(f, "json="):
|
||||
p.JSONName = f[5:]
|
||||
case strings.HasPrefix(f, "enum="):
|
||||
p.Enum = f[5:]
|
||||
case f == "proto3":
|
||||
p.proto3 = true
|
||||
case f == "oneof":
|
||||
p.oneof = true
|
||||
case strings.HasPrefix(f, "def="):
|
||||
p.HasDefault = true
|
||||
p.Default = f[4:] // rest of string
|
||||
if i+1 < len(fields) {
|
||||
// Commas aren't escaped, and def is always last.
|
||||
p.Default += "," + strings.Join(fields[i+1:], ",")
|
||||
break outer
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
|
||||
|
||||
// setFieldProps initializes the field properties for submessages and maps.
|
||||
func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
|
||||
switch t1 := typ; t1.Kind() {
|
||||
case reflect.Ptr:
|
||||
if t1.Elem().Kind() == reflect.Struct {
|
||||
p.stype = t1.Elem()
|
||||
}
|
||||
|
||||
case reflect.Slice:
|
||||
if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct {
|
||||
p.stype = t2.Elem()
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
p.mtype = t1
|
||||
p.MapKeyProp = &Properties{}
|
||||
p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
|
||||
p.MapValProp = &Properties{}
|
||||
vtype := p.mtype.Elem()
|
||||
if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
|
||||
// The value type is not a message (*T) or bytes ([]byte),
|
||||
// so we need encoders for the pointer to this type.
|
||||
vtype = reflect.PtrTo(vtype)
|
||||
}
|
||||
p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
|
||||
}
|
||||
|
||||
if p.stype != nil {
|
||||
if lockGetProp {
|
||||
p.sprop = GetProperties(p.stype)
|
||||
} else {
|
||||
p.sprop = getPropertiesLocked(p.stype)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
|
||||
)
|
||||
|
||||
// Init populates the properties from a protocol buffer struct tag.
|
||||
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
|
||||
p.init(typ, name, tag, f, true)
|
||||
}
|
||||
|
||||
func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
|
||||
// "bytes,49,opt,def=hello!"
|
||||
p.Name = name
|
||||
p.OrigName = name
|
||||
if tag == "" {
|
||||
return
|
||||
}
|
||||
p.Parse(tag)
|
||||
p.setFieldProps(typ, f, lockGetProp)
|
||||
}
|
||||
|
||||
var (
|
||||
propertiesMu sync.RWMutex
|
||||
propertiesMap = make(map[reflect.Type]*StructProperties)
|
||||
)
|
||||
|
||||
// GetProperties returns the list of properties for the type represented by t.
|
||||
// t must represent a generated struct type of a protocol message.
|
||||
func GetProperties(t reflect.Type) *StructProperties {
|
||||
if t.Kind() != reflect.Struct {
|
||||
panic("proto: type must have kind struct")
|
||||
}
|
||||
|
||||
// Most calls to GetProperties in a long-running program will be
|
||||
// retrieving details for types we have seen before.
|
||||
propertiesMu.RLock()
|
||||
sprop, ok := propertiesMap[t]
|
||||
propertiesMu.RUnlock()
|
||||
if ok {
|
||||
return sprop
|
||||
}
|
||||
|
||||
propertiesMu.Lock()
|
||||
sprop = getPropertiesLocked(t)
|
||||
propertiesMu.Unlock()
|
||||
return sprop
|
||||
}
|
||||
|
||||
type (
|
||||
oneofFuncsIface interface {
|
||||
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
|
||||
}
|
||||
oneofWrappersIface interface {
|
||||
XXX_OneofWrappers() []interface{}
|
||||
}
|
||||
)
|
||||
|
||||
// getPropertiesLocked requires that propertiesMu is held.
|
||||
func getPropertiesLocked(t reflect.Type) *StructProperties {
|
||||
if prop, ok := propertiesMap[t]; ok {
|
||||
return prop
|
||||
}
|
||||
|
||||
prop := new(StructProperties)
|
||||
// in case of recursive protos, fill this in now.
|
||||
propertiesMap[t] = prop
|
||||
|
||||
// build properties
|
||||
prop.Prop = make([]*Properties, t.NumField())
|
||||
prop.order = make([]int, t.NumField())
|
||||
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
p := new(Properties)
|
||||
name := f.Name
|
||||
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
|
||||
|
||||
oneof := f.Tag.Get("protobuf_oneof") // special case
|
||||
if oneof != "" {
|
||||
// Oneof fields don't use the traditional protobuf tag.
|
||||
p.OrigName = oneof
|
||||
}
|
||||
prop.Prop[i] = p
|
||||
prop.order[i] = i
|
||||
if debug {
|
||||
print(i, " ", f.Name, " ", t.String(), " ")
|
||||
if p.Tag > 0 {
|
||||
print(p.String())
|
||||
}
|
||||
print("\n")
|
||||
}
|
||||
}
|
||||
|
||||
// Re-order prop.order.
|
||||
sort.Sort(prop)
|
||||
|
||||
var oots []interface{}
|
||||
switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
|
||||
case oneofFuncsIface:
|
||||
_, _, _, oots = m.XXX_OneofFuncs()
|
||||
case oneofWrappersIface:
|
||||
oots = m.XXX_OneofWrappers()
|
||||
}
|
||||
if len(oots) > 0 {
|
||||
// Interpret oneof metadata.
|
||||
prop.OneofTypes = make(map[string]*OneofProperties)
|
||||
for _, oot := range oots {
|
||||
oop := &OneofProperties{
|
||||
Type: reflect.ValueOf(oot).Type(), // *T
|
||||
Prop: new(Properties),
|
||||
}
|
||||
sft := oop.Type.Elem().Field(0)
|
||||
oop.Prop.Name = sft.Name
|
||||
oop.Prop.Parse(sft.Tag.Get("protobuf"))
|
||||
// There will be exactly one interface field that
|
||||
// this new value is assignable to.
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
if f.Type.Kind() != reflect.Interface {
|
||||
continue
|
||||
}
|
||||
if !oop.Type.AssignableTo(f.Type) {
|
||||
continue
|
||||
}
|
||||
oop.Field = i
|
||||
break
|
||||
}
|
||||
prop.OneofTypes[oop.Prop.OrigName] = oop
|
||||
}
|
||||
}
|
||||
|
||||
// build required counts
|
||||
// build tags
|
||||
reqCount := 0
|
||||
prop.decoderOrigNames = make(map[string]int)
|
||||
for i, p := range prop.Prop {
|
||||
if strings.HasPrefix(p.Name, "XXX_") {
|
||||
// Internal fields should not appear in tags/origNames maps.
|
||||
// They are handled specially when encoding and decoding.
|
||||
continue
|
||||
}
|
||||
if p.Required {
|
||||
reqCount++
|
||||
}
|
||||
prop.decoderTags.put(p.Tag, i)
|
||||
prop.decoderOrigNames[p.OrigName] = i
|
||||
}
|
||||
prop.reqCount = reqCount
|
||||
|
||||
return prop
|
||||
}
|
||||
|
||||
// A global registry of enum types.
|
||||
// The generated code will register the generated maps by calling RegisterEnum.
|
||||
|
||||
var enumValueMaps = make(map[string]map[string]int32)
|
||||
|
||||
// RegisterEnum is called from the generated code to install the enum descriptor
|
||||
// maps into the global table to aid parsing text format protocol buffers.
|
||||
func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
|
||||
if _, ok := enumValueMaps[typeName]; ok {
|
||||
panic("proto: duplicate enum registered: " + typeName)
|
||||
}
|
||||
enumValueMaps[typeName] = valueMap
|
||||
}
|
||||
|
||||
// EnumValueMap returns the mapping from names to integers of the
|
||||
// enum type enumType, or a nil if not found.
|
||||
func EnumValueMap(enumType string) map[string]int32 {
|
||||
return enumValueMaps[enumType]
|
||||
}
|
||||
|
||||
// A registry of all linked message types.
|
||||
// The string is a fully-qualified proto name ("pkg.Message").
|
||||
var (
|
||||
protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers
|
||||
protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types
|
||||
revProtoTypes = make(map[reflect.Type]string)
|
||||
)
|
||||
|
||||
// RegisterType is called from generated code and maps from the fully qualified
|
||||
// proto name to the type (pointer to struct) of the protocol buffer.
|
||||
func RegisterType(x Message, name string) {
|
||||
if _, ok := protoTypedNils[name]; ok {
|
||||
// TODO: Some day, make this a panic.
|
||||
log.Printf("proto: duplicate proto type registered: %s", name)
|
||||
return
|
||||
}
|
||||
t := reflect.TypeOf(x)
|
||||
if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
|
||||
// Generated code always calls RegisterType with nil x.
|
||||
// This check is just for extra safety.
|
||||
protoTypedNils[name] = x
|
||||
} else {
|
||||
protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
|
||||
}
|
||||
revProtoTypes[t] = name
|
||||
}
|
||||
|
||||
// RegisterMapType is called from generated code and maps from the fully qualified
|
||||
// proto name to the native map type of the proto map definition.
|
||||
func RegisterMapType(x interface{}, name string) {
|
||||
if reflect.TypeOf(x).Kind() != reflect.Map {
|
||||
panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
|
||||
}
|
||||
if _, ok := protoMapTypes[name]; ok {
|
||||
log.Printf("proto: duplicate proto type registered: %s", name)
|
||||
return
|
||||
}
|
||||
t := reflect.TypeOf(x)
|
||||
protoMapTypes[name] = t
|
||||
revProtoTypes[t] = name
|
||||
}
|
||||
|
||||
// MessageName returns the fully-qualified proto name for the given message type.
|
||||
func MessageName(x Message) string {
|
||||
type xname interface {
|
||||
XXX_MessageName() string
|
||||
}
|
||||
if m, ok := x.(xname); ok {
|
||||
return m.XXX_MessageName()
|
||||
}
|
||||
return revProtoTypes[reflect.TypeOf(x)]
|
||||
}
|
||||
|
||||
// MessageType returns the message type (pointer to struct) for a named message.
|
||||
// The type is not guaranteed to implement proto.Message if the name refers to a
|
||||
// map entry.
|
||||
func MessageType(name string) reflect.Type {
|
||||
if t, ok := protoTypedNils[name]; ok {
|
||||
return reflect.TypeOf(t)
|
||||
}
|
||||
return protoMapTypes[name]
|
||||
}
|
||||
|
||||
// A registry of all linked proto files.
|
||||
var (
|
||||
protoFiles = make(map[string][]byte) // file name => fileDescriptor
|
||||
)
|
||||
|
||||
// RegisterFile is called from generated code and maps from the
|
||||
// full file name of a .proto file to its compressed FileDescriptorProto.
|
||||
func RegisterFile(filename string, fileDescriptor []byte) {
|
||||
protoFiles[filename] = fileDescriptor
|
||||
}
|
||||
|
||||
// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
|
||||
func FileDescriptor(filename string) []byte { return protoFiles[filename] }
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,654 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// Merge merges the src message into dst.
|
||||
// This assumes that dst and src of the same type and are non-nil.
|
||||
func (a *InternalMessageInfo) Merge(dst, src Message) {
|
||||
mi := atomicLoadMergeInfo(&a.merge)
|
||||
if mi == nil {
|
||||
mi = getMergeInfo(reflect.TypeOf(dst).Elem())
|
||||
atomicStoreMergeInfo(&a.merge, mi)
|
||||
}
|
||||
mi.merge(toPointer(&dst), toPointer(&src))
|
||||
}
|
||||
|
||||
type mergeInfo struct {
|
||||
typ reflect.Type
|
||||
|
||||
initialized int32 // 0: only typ is valid, 1: everything is valid
|
||||
lock sync.Mutex
|
||||
|
||||
fields []mergeFieldInfo
|
||||
unrecognized field // Offset of XXX_unrecognized
|
||||
}
|
||||
|
||||
type mergeFieldInfo struct {
|
||||
field field // Offset of field, guaranteed to be valid
|
||||
|
||||
// isPointer reports whether the value in the field is a pointer.
|
||||
// This is true for the following situations:
|
||||
// * Pointer to struct
|
||||
// * Pointer to basic type (proto2 only)
|
||||
// * Slice (first value in slice header is a pointer)
|
||||
// * String (first value in string header is a pointer)
|
||||
isPointer bool
|
||||
|
||||
// basicWidth reports the width of the field assuming that it is directly
|
||||
// embedded in the struct (as is the case for basic types in proto3).
|
||||
// The possible values are:
|
||||
// 0: invalid
|
||||
// 1: bool
|
||||
// 4: int32, uint32, float32
|
||||
// 8: int64, uint64, float64
|
||||
basicWidth int
|
||||
|
||||
// Where dst and src are pointers to the types being merged.
|
||||
merge func(dst, src pointer)
|
||||
}
|
||||
|
||||
var (
|
||||
mergeInfoMap = map[reflect.Type]*mergeInfo{}
|
||||
mergeInfoLock sync.Mutex
|
||||
)
|
||||
|
||||
func getMergeInfo(t reflect.Type) *mergeInfo {
|
||||
mergeInfoLock.Lock()
|
||||
defer mergeInfoLock.Unlock()
|
||||
mi := mergeInfoMap[t]
|
||||
if mi == nil {
|
||||
mi = &mergeInfo{typ: t}
|
||||
mergeInfoMap[t] = mi
|
||||
}
|
||||
return mi
|
||||
}
|
||||
|
||||
// merge merges src into dst assuming they are both of type *mi.typ.
|
||||
func (mi *mergeInfo) merge(dst, src pointer) {
|
||||
if dst.isNil() {
|
||||
panic("proto: nil destination")
|
||||
}
|
||||
if src.isNil() {
|
||||
return // Nothing to do.
|
||||
}
|
||||
|
||||
if atomic.LoadInt32(&mi.initialized) == 0 {
|
||||
mi.computeMergeInfo()
|
||||
}
|
||||
|
||||
for _, fi := range mi.fields {
|
||||
sfp := src.offset(fi.field)
|
||||
|
||||
// As an optimization, we can avoid the merge function call cost
|
||||
// if we know for sure that the source will have no effect
|
||||
// by checking if it is the zero value.
|
||||
if unsafeAllowed {
|
||||
if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
|
||||
continue
|
||||
}
|
||||
if fi.basicWidth > 0 {
|
||||
switch {
|
||||
case fi.basicWidth == 1 && !*sfp.toBool():
|
||||
continue
|
||||
case fi.basicWidth == 4 && *sfp.toUint32() == 0:
|
||||
continue
|
||||
case fi.basicWidth == 8 && *sfp.toUint64() == 0:
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dfp := dst.offset(fi.field)
|
||||
fi.merge(dfp, sfp)
|
||||
}
|
||||
|
||||
// TODO: Make this faster?
|
||||
out := dst.asPointerTo(mi.typ).Elem()
|
||||
in := src.asPointerTo(mi.typ).Elem()
|
||||
if emIn, err := extendable(in.Addr().Interface()); err == nil {
|
||||
emOut, _ := extendable(out.Addr().Interface())
|
||||
mIn, muIn := emIn.extensionsRead()
|
||||
if mIn != nil {
|
||||
mOut := emOut.extensionsWrite()
|
||||
muIn.Lock()
|
||||
mergeExtension(mOut, mIn)
|
||||
muIn.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
if mi.unrecognized.IsValid() {
|
||||
if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
|
||||
*dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (mi *mergeInfo) computeMergeInfo() {
|
||||
mi.lock.Lock()
|
||||
defer mi.lock.Unlock()
|
||||
if mi.initialized != 0 {
|
||||
return
|
||||
}
|
||||
t := mi.typ
|
||||
n := t.NumField()
|
||||
|
||||
props := GetProperties(t)
|
||||
for i := 0; i < n; i++ {
|
||||
f := t.Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
|
||||
mfi := mergeFieldInfo{field: toField(&f)}
|
||||
tf := f.Type
|
||||
|
||||
// As an optimization, we can avoid the merge function call cost
|
||||
// if we know for sure that the source will have no effect
|
||||
// by checking if it is the zero value.
|
||||
if unsafeAllowed {
|
||||
switch tf.Kind() {
|
||||
case reflect.Ptr, reflect.Slice, reflect.String:
|
||||
// As a special case, we assume slices and strings are pointers
|
||||
// since we know that the first field in the SliceSlice or
|
||||
// StringHeader is a data pointer.
|
||||
mfi.isPointer = true
|
||||
case reflect.Bool:
|
||||
mfi.basicWidth = 1
|
||||
case reflect.Int32, reflect.Uint32, reflect.Float32:
|
||||
mfi.basicWidth = 4
|
||||
case reflect.Int64, reflect.Uint64, reflect.Float64:
|
||||
mfi.basicWidth = 8
|
||||
}
|
||||
}
|
||||
|
||||
// Unwrap tf to get at its most basic type.
|
||||
var isPointer, isSlice bool
|
||||
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
||||
isSlice = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if tf.Kind() == reflect.Ptr {
|
||||
isPointer = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
||||
panic("both pointer and slice for basic type in " + tf.Name())
|
||||
}
|
||||
|
||||
switch tf.Kind() {
|
||||
case reflect.Int32:
|
||||
switch {
|
||||
case isSlice: // E.g., []int32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
// NOTE: toInt32Slice is not defined (see pointer_reflect.go).
|
||||
/*
|
||||
sfsp := src.toInt32Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toInt32Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []int64{}
|
||||
}
|
||||
}
|
||||
*/
|
||||
sfs := src.getInt32Slice()
|
||||
if sfs != nil {
|
||||
dfs := dst.getInt32Slice()
|
||||
dfs = append(dfs, sfs...)
|
||||
if dfs == nil {
|
||||
dfs = []int32{}
|
||||
}
|
||||
dst.setInt32Slice(dfs)
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *int32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
// NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
|
||||
/*
|
||||
sfpp := src.toInt32Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toInt32Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Int32(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
*/
|
||||
sfp := src.getInt32Ptr()
|
||||
if sfp != nil {
|
||||
dfp := dst.getInt32Ptr()
|
||||
if dfp == nil {
|
||||
dst.setInt32Ptr(*sfp)
|
||||
} else {
|
||||
*dfp = *sfp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., int32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toInt32(); v != 0 {
|
||||
*dst.toInt32() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Int64:
|
||||
switch {
|
||||
case isSlice: // E.g., []int64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toInt64Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toInt64Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []int64{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *int64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toInt64Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toInt64Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Int64(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., int64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toInt64(); v != 0 {
|
||||
*dst.toInt64() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Uint32:
|
||||
switch {
|
||||
case isSlice: // E.g., []uint32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toUint32Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toUint32Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []uint32{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *uint32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toUint32Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toUint32Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Uint32(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., uint32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toUint32(); v != 0 {
|
||||
*dst.toUint32() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Uint64:
|
||||
switch {
|
||||
case isSlice: // E.g., []uint64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toUint64Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toUint64Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []uint64{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *uint64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toUint64Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toUint64Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Uint64(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., uint64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toUint64(); v != 0 {
|
||||
*dst.toUint64() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Float32:
|
||||
switch {
|
||||
case isSlice: // E.g., []float32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toFloat32Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toFloat32Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []float32{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *float32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toFloat32Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toFloat32Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Float32(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., float32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toFloat32(); v != 0 {
|
||||
*dst.toFloat32() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Float64:
|
||||
switch {
|
||||
case isSlice: // E.g., []float64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toFloat64Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toFloat64Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []float64{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *float64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toFloat64Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toFloat64Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Float64(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., float64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toFloat64(); v != 0 {
|
||||
*dst.toFloat64() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Bool:
|
||||
switch {
|
||||
case isSlice: // E.g., []bool
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toBoolSlice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toBoolSlice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []bool{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *bool
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toBoolPtr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toBoolPtr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Bool(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., bool
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toBool(); v {
|
||||
*dst.toBool() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.String:
|
||||
switch {
|
||||
case isSlice: // E.g., []string
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toStringSlice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toStringSlice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []string{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *string
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toStringPtr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toStringPtr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = String(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., string
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toString(); v != "" {
|
||||
*dst.toString() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
isProto3 := props.Prop[i].proto3
|
||||
switch {
|
||||
case isPointer:
|
||||
panic("bad pointer in byte slice case in " + tf.Name())
|
||||
case tf.Elem().Kind() != reflect.Uint8:
|
||||
panic("bad element kind in byte slice case in " + tf.Name())
|
||||
case isSlice: // E.g., [][]byte
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sbsp := src.toBytesSlice()
|
||||
if *sbsp != nil {
|
||||
dbsp := dst.toBytesSlice()
|
||||
for _, sb := range *sbsp {
|
||||
if sb == nil {
|
||||
*dbsp = append(*dbsp, nil)
|
||||
} else {
|
||||
*dbsp = append(*dbsp, append([]byte{}, sb...))
|
||||
}
|
||||
}
|
||||
if *dbsp == nil {
|
||||
*dbsp = [][]byte{}
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., []byte
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sbp := src.toBytes()
|
||||
if *sbp != nil {
|
||||
dbp := dst.toBytes()
|
||||
if !isProto3 || len(*sbp) > 0 {
|
||||
*dbp = append([]byte{}, *sbp...)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Struct:
|
||||
switch {
|
||||
case !isPointer:
|
||||
panic(fmt.Sprintf("message field %s without pointer", tf))
|
||||
case isSlice: // E.g., []*pb.T
|
||||
mi := getMergeInfo(tf)
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sps := src.getPointerSlice()
|
||||
if sps != nil {
|
||||
dps := dst.getPointerSlice()
|
||||
for _, sp := range sps {
|
||||
var dp pointer
|
||||
if !sp.isNil() {
|
||||
dp = valToPointer(reflect.New(tf))
|
||||
mi.merge(dp, sp)
|
||||
}
|
||||
dps = append(dps, dp)
|
||||
}
|
||||
if dps == nil {
|
||||
dps = []pointer{}
|
||||
}
|
||||
dst.setPointerSlice(dps)
|
||||
}
|
||||
}
|
||||
default: // E.g., *pb.T
|
||||
mi := getMergeInfo(tf)
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sp := src.getPointer()
|
||||
if !sp.isNil() {
|
||||
dp := dst.getPointer()
|
||||
if dp.isNil() {
|
||||
dp = valToPointer(reflect.New(tf))
|
||||
dst.setPointer(dp)
|
||||
}
|
||||
mi.merge(dp, sp)
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Map:
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic("bad pointer or slice in map case in " + tf.Name())
|
||||
default: // E.g., map[K]V
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sm := src.asPointerTo(tf).Elem()
|
||||
if sm.Len() == 0 {
|
||||
return
|
||||
}
|
||||
dm := dst.asPointerTo(tf).Elem()
|
||||
if dm.IsNil() {
|
||||
dm.Set(reflect.MakeMap(tf))
|
||||
}
|
||||
|
||||
switch tf.Elem().Kind() {
|
||||
case reflect.Ptr: // Proto struct (e.g., *T)
|
||||
for _, key := range sm.MapKeys() {
|
||||
val := sm.MapIndex(key)
|
||||
val = reflect.ValueOf(Clone(val.Interface().(Message)))
|
||||
dm.SetMapIndex(key, val)
|
||||
}
|
||||
case reflect.Slice: // E.g. Bytes type (e.g., []byte)
|
||||
for _, key := range sm.MapKeys() {
|
||||
val := sm.MapIndex(key)
|
||||
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
|
||||
dm.SetMapIndex(key, val)
|
||||
}
|
||||
default: // Basic type (e.g., string)
|
||||
for _, key := range sm.MapKeys() {
|
||||
val := sm.MapIndex(key)
|
||||
dm.SetMapIndex(key, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Interface:
|
||||
// Must be oneof field.
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic("bad pointer or slice in interface case in " + tf.Name())
|
||||
default: // E.g., interface{}
|
||||
// TODO: Make this faster?
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
su := src.asPointerTo(tf).Elem()
|
||||
if !su.IsNil() {
|
||||
du := dst.asPointerTo(tf).Elem()
|
||||
typ := su.Elem().Type()
|
||||
if du.IsNil() || du.Elem().Type() != typ {
|
||||
du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
|
||||
}
|
||||
sv := su.Elem().Elem().Field(0)
|
||||
if sv.Kind() == reflect.Ptr && sv.IsNil() {
|
||||
return
|
||||
}
|
||||
dv := du.Elem().Elem().Field(0)
|
||||
if dv.Kind() == reflect.Ptr && dv.IsNil() {
|
||||
dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
|
||||
}
|
||||
switch sv.Type().Kind() {
|
||||
case reflect.Ptr: // Proto struct (e.g., *T)
|
||||
Merge(dv.Interface().(Message), sv.Interface().(Message))
|
||||
case reflect.Slice: // E.g. Bytes type (e.g., []byte)
|
||||
dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
|
||||
default: // Basic type (e.g., string)
|
||||
dv.Set(sv)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("merger not found for type:%s", tf))
|
||||
}
|
||||
mi.fields = append(mi.fields, mfi)
|
||||
}
|
||||
|
||||
mi.unrecognized = invalidField
|
||||
if f, ok := t.FieldByName("XXX_unrecognized"); ok {
|
||||
if f.Type != reflect.TypeOf([]byte{}) {
|
||||
panic("expected XXX_unrecognized to be of type []byte")
|
||||
}
|
||||
mi.unrecognized = toField(&f)
|
||||
}
|
||||
|
||||
atomic.StoreInt32(&mi.initialized, 1)
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,843 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
// Functions for writing the text protocol buffer format.
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
newline = []byte("\n")
|
||||
spaces = []byte(" ")
|
||||
endBraceNewline = []byte("}\n")
|
||||
backslashN = []byte{'\\', 'n'}
|
||||
backslashR = []byte{'\\', 'r'}
|
||||
backslashT = []byte{'\\', 't'}
|
||||
backslashDQ = []byte{'\\', '"'}
|
||||
backslashBS = []byte{'\\', '\\'}
|
||||
posInf = []byte("inf")
|
||||
negInf = []byte("-inf")
|
||||
nan = []byte("nan")
|
||||
)
|
||||
|
||||
type writer interface {
|
||||
io.Writer
|
||||
WriteByte(byte) error
|
||||
}
|
||||
|
||||
// textWriter is an io.Writer that tracks its indentation level.
|
||||
type textWriter struct {
|
||||
ind int
|
||||
complete bool // if the current position is a complete line
|
||||
compact bool // whether to write out as a one-liner
|
||||
w writer
|
||||
}
|
||||
|
||||
func (w *textWriter) WriteString(s string) (n int, err error) {
|
||||
if !strings.Contains(s, "\n") {
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
w.complete = false
|
||||
return io.WriteString(w.w, s)
|
||||
}
|
||||
// WriteString is typically called without newlines, so this
|
||||
// codepath and its copy are rare. We copy to avoid
|
||||
// duplicating all of Write's logic here.
|
||||
return w.Write([]byte(s))
|
||||
}
|
||||
|
||||
func (w *textWriter) Write(p []byte) (n int, err error) {
|
||||
newlines := bytes.Count(p, newline)
|
||||
if newlines == 0 {
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
n, err = w.w.Write(p)
|
||||
w.complete = false
|
||||
return n, err
|
||||
}
|
||||
|
||||
frags := bytes.SplitN(p, newline, newlines+1)
|
||||
if w.compact {
|
||||
for i, frag := range frags {
|
||||
if i > 0 {
|
||||
if err := w.w.WriteByte(' '); err != nil {
|
||||
return n, err
|
||||
}
|
||||
n++
|
||||
}
|
||||
nn, err := w.w.Write(frag)
|
||||
n += nn
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
for i, frag := range frags {
|
||||
if w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
nn, err := w.w.Write(frag)
|
||||
n += nn
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
if i+1 < len(frags) {
|
||||
if err := w.w.WriteByte('\n'); err != nil {
|
||||
return n, err
|
||||
}
|
||||
n++
|
||||
}
|
||||
}
|
||||
w.complete = len(frags[len(frags)-1]) == 0
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (w *textWriter) WriteByte(c byte) error {
|
||||
if w.compact && c == '\n' {
|
||||
c = ' '
|
||||
}
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
err := w.w.WriteByte(c)
|
||||
w.complete = c == '\n'
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *textWriter) indent() { w.ind++ }
|
||||
|
||||
func (w *textWriter) unindent() {
|
||||
if w.ind == 0 {
|
||||
log.Print("proto: textWriter unindented too far")
|
||||
return
|
||||
}
|
||||
w.ind--
|
||||
}
|
||||
|
||||
func writeName(w *textWriter, props *Properties) error {
|
||||
if _, err := w.WriteString(props.OrigName); err != nil {
|
||||
return err
|
||||
}
|
||||
if props.Wire != "group" {
|
||||
return w.WriteByte(':')
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func requiresQuotes(u string) bool {
|
||||
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
|
||||
for _, ch := range u {
|
||||
switch {
|
||||
case ch == '.' || ch == '/' || ch == '_':
|
||||
continue
|
||||
case '0' <= ch && ch <= '9':
|
||||
continue
|
||||
case 'A' <= ch && ch <= 'Z':
|
||||
continue
|
||||
case 'a' <= ch && ch <= 'z':
|
||||
continue
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isAny reports whether sv is a google.protobuf.Any message
|
||||
func isAny(sv reflect.Value) bool {
|
||||
type wkt interface {
|
||||
XXX_WellKnownType() string
|
||||
}
|
||||
t, ok := sv.Addr().Interface().(wkt)
|
||||
return ok && t.XXX_WellKnownType() == "Any"
|
||||
}
|
||||
|
||||
// writeProto3Any writes an expanded google.protobuf.Any message.
|
||||
//
|
||||
// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
|
||||
// required messages are not linked in).
|
||||
//
|
||||
// It returns (true, error) when sv was written in expanded format or an error
|
||||
// was encountered.
|
||||
func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
|
||||
turl := sv.FieldByName("TypeUrl")
|
||||
val := sv.FieldByName("Value")
|
||||
if !turl.IsValid() || !val.IsValid() {
|
||||
return true, errors.New("proto: invalid google.protobuf.Any message")
|
||||
}
|
||||
|
||||
b, ok := val.Interface().([]byte)
|
||||
if !ok {
|
||||
return true, errors.New("proto: invalid google.protobuf.Any message")
|
||||
}
|
||||
|
||||
parts := strings.Split(turl.String(), "/")
|
||||
mt := MessageType(parts[len(parts)-1])
|
||||
if mt == nil {
|
||||
return false, nil
|
||||
}
|
||||
m := reflect.New(mt.Elem())
|
||||
if err := Unmarshal(b, m.Interface().(Message)); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
w.Write([]byte("["))
|
||||
u := turl.String()
|
||||
if requiresQuotes(u) {
|
||||
writeString(w, u)
|
||||
} else {
|
||||
w.Write([]byte(u))
|
||||
}
|
||||
if w.compact {
|
||||
w.Write([]byte("]:<"))
|
||||
} else {
|
||||
w.Write([]byte("]: <\n"))
|
||||
w.ind++
|
||||
}
|
||||
if err := tm.writeStruct(w, m.Elem()); err != nil {
|
||||
return true, err
|
||||
}
|
||||
if w.compact {
|
||||
w.Write([]byte("> "))
|
||||
} else {
|
||||
w.ind--
|
||||
w.Write([]byte(">\n"))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
||||
if tm.ExpandAny && isAny(sv) {
|
||||
if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
|
||||
return err
|
||||
}
|
||||
}
|
||||
st := sv.Type()
|
||||
sprops := GetProperties(st)
|
||||
for i := 0; i < sv.NumField(); i++ {
|
||||
fv := sv.Field(i)
|
||||
props := sprops.Prop[i]
|
||||
name := st.Field(i).Name
|
||||
|
||||
if name == "XXX_NoUnkeyedLiteral" {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasPrefix(name, "XXX_") {
|
||||
// There are two XXX_ fields:
|
||||
// XXX_unrecognized []byte
|
||||
// XXX_extensions map[int32]proto.Extension
|
||||
// The first is handled here;
|
||||
// the second is handled at the bottom of this function.
|
||||
if name == "XXX_unrecognized" && !fv.IsNil() {
|
||||
if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if fv.Kind() == reflect.Ptr && fv.IsNil() {
|
||||
// Field not filled in. This could be an optional field or
|
||||
// a required field that wasn't filled in. Either way, there
|
||||
// isn't anything we can show for it.
|
||||
continue
|
||||
}
|
||||
if fv.Kind() == reflect.Slice && fv.IsNil() {
|
||||
// Repeated field that is empty, or a bytes field that is unused.
|
||||
continue
|
||||
}
|
||||
|
||||
if props.Repeated && fv.Kind() == reflect.Slice {
|
||||
// Repeated field.
|
||||
for j := 0; j < fv.Len(); j++ {
|
||||
if err := writeName(w, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
v := fv.Index(j)
|
||||
if v.Kind() == reflect.Ptr && v.IsNil() {
|
||||
// A nil message in a repeated field is not valid,
|
||||
// but we can handle that more gracefully than panicking.
|
||||
if _, err := w.Write([]byte("<nil>\n")); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err := tm.writeAny(w, v, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if fv.Kind() == reflect.Map {
|
||||
// Map fields are rendered as a repeated struct with key/value fields.
|
||||
keys := fv.MapKeys()
|
||||
sort.Sort(mapKeys(keys))
|
||||
for _, key := range keys {
|
||||
val := fv.MapIndex(key)
|
||||
if err := writeName(w, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// open struct
|
||||
if err := w.WriteByte('<'); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.indent()
|
||||
// key
|
||||
if _, err := w.WriteString("key:"); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
// nil values aren't legal, but we can avoid panicking because of them.
|
||||
if val.Kind() != reflect.Ptr || !val.IsNil() {
|
||||
// value
|
||||
if _, err := w.WriteString("value:"); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := tm.writeAny(w, val, props.MapValProp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// close struct
|
||||
w.unindent()
|
||||
if err := w.WriteByte('>'); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
|
||||
// empty bytes field
|
||||
continue
|
||||
}
|
||||
if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
|
||||
// proto3 non-repeated scalar field; skip if zero value
|
||||
if isProto3Zero(fv) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if fv.Kind() == reflect.Interface {
|
||||
// Check if it is a oneof.
|
||||
if st.Field(i).Tag.Get("protobuf_oneof") != "" {
|
||||
// fv is nil, or holds a pointer to generated struct.
|
||||
// That generated struct has exactly one field,
|
||||
// which has a protobuf struct tag.
|
||||
if fv.IsNil() {
|
||||
continue
|
||||
}
|
||||
inner := fv.Elem().Elem() // interface -> *T -> T
|
||||
tag := inner.Type().Field(0).Tag.Get("protobuf")
|
||||
props = new(Properties) // Overwrite the outer props var, but not its pointee.
|
||||
props.Parse(tag)
|
||||
// Write the value in the oneof, not the oneof itself.
|
||||
fv = inner.Field(0)
|
||||
|
||||
// Special case to cope with malformed messages gracefully:
|
||||
// If the value in the oneof is a nil pointer, don't panic
|
||||
// in writeAny.
|
||||
if fv.Kind() == reflect.Ptr && fv.IsNil() {
|
||||
// Use errors.New so writeAny won't render quotes.
|
||||
msg := errors.New("/* nil */")
|
||||
fv = reflect.ValueOf(&msg).Elem()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := writeName(w, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Enums have a String method, so writeAny will work fine.
|
||||
if err := tm.writeAny(w, fv, props); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Extensions (the XXX_extensions field).
|
||||
pv := sv.Addr()
|
||||
if _, err := extendable(pv.Interface()); err == nil {
|
||||
if err := tm.writeExtensions(w, pv); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeAny writes an arbitrary field.
|
||||
func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
|
||||
v = reflect.Indirect(v)
|
||||
|
||||
// Floats have special cases.
|
||||
if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
|
||||
x := v.Float()
|
||||
var b []byte
|
||||
switch {
|
||||
case math.IsInf(x, 1):
|
||||
b = posInf
|
||||
case math.IsInf(x, -1):
|
||||
b = negInf
|
||||
case math.IsNaN(x):
|
||||
b = nan
|
||||
}
|
||||
if b != nil {
|
||||
_, err := w.Write(b)
|
||||
return err
|
||||
}
|
||||
// Other values are handled below.
|
||||
}
|
||||
|
||||
// We don't attempt to serialise every possible value type; only those
|
||||
// that can occur in protocol buffers.
|
||||
switch v.Kind() {
|
||||
case reflect.Slice:
|
||||
// Should only be a []byte; repeated fields are handled in writeStruct.
|
||||
if err := writeString(w, string(v.Bytes())); err != nil {
|
||||
return err
|
||||
}
|
||||
case reflect.String:
|
||||
if err := writeString(w, v.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
case reflect.Struct:
|
||||
// Required/optional group/message.
|
||||
var bra, ket byte = '<', '>'
|
||||
if props != nil && props.Wire == "group" {
|
||||
bra, ket = '{', '}'
|
||||
}
|
||||
if err := w.WriteByte(bra); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.indent()
|
||||
if v.CanAddr() {
|
||||
// Calling v.Interface on a struct causes the reflect package to
|
||||
// copy the entire struct. This is racy with the new Marshaler
|
||||
// since we atomically update the XXX_sizecache.
|
||||
//
|
||||
// Thus, we retrieve a pointer to the struct if possible to avoid
|
||||
// a race since v.Interface on the pointer doesn't copy the struct.
|
||||
//
|
||||
// If v is not addressable, then we are not worried about a race
|
||||
// since it implies that the binary Marshaler cannot possibly be
|
||||
// mutating this value.
|
||||
v = v.Addr()
|
||||
}
|
||||
if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
|
||||
text, err := etm.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = w.Write(text); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
if err := tm.writeStruct(w, v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.unindent()
|
||||
if err := w.WriteByte(ket); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
_, err := fmt.Fprint(w, v.Interface())
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// equivalent to C's isprint.
|
||||
func isprint(c byte) bool {
|
||||
return c >= 0x20 && c < 0x7f
|
||||
}
|
||||
|
||||
// writeString writes a string in the protocol buffer text format.
|
||||
// It is similar to strconv.Quote except we don't use Go escape sequences,
|
||||
// we treat the string as a byte sequence, and we use octal escapes.
|
||||
// These differences are to maintain interoperability with the other
|
||||
// languages' implementations of the text format.
|
||||
func writeString(w *textWriter, s string) error {
|
||||
// use WriteByte here to get any needed indent
|
||||
if err := w.WriteByte('"'); err != nil {
|
||||
return err
|
||||
}
|
||||
// Loop over the bytes, not the runes.
|
||||
for i := 0; i < len(s); i++ {
|
||||
var err error
|
||||
// Divergence from C++: we don't escape apostrophes.
|
||||
// There's no need to escape them, and the C++ parser
|
||||
// copes with a naked apostrophe.
|
||||
switch c := s[i]; c {
|
||||
case '\n':
|
||||
_, err = w.w.Write(backslashN)
|
||||
case '\r':
|
||||
_, err = w.w.Write(backslashR)
|
||||
case '\t':
|
||||
_, err = w.w.Write(backslashT)
|
||||
case '"':
|
||||
_, err = w.w.Write(backslashDQ)
|
||||
case '\\':
|
||||
_, err = w.w.Write(backslashBS)
|
||||
default:
|
||||
if isprint(c) {
|
||||
err = w.w.WriteByte(c)
|
||||
} else {
|
||||
_, err = fmt.Fprintf(w.w, "\\%03o", c)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return w.WriteByte('"')
|
||||
}
|
||||
|
||||
func writeUnknownStruct(w *textWriter, data []byte) (err error) {
|
||||
if !w.compact {
|
||||
if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
b := NewBuffer(data)
|
||||
for b.index < len(b.buf) {
|
||||
x, err := b.DecodeVarint()
|
||||
if err != nil {
|
||||
_, err := fmt.Fprintf(w, "/* %v */\n", err)
|
||||
return err
|
||||
}
|
||||
wire, tag := x&7, x>>3
|
||||
if wire == WireEndGroup {
|
||||
w.unindent()
|
||||
if _, err := w.Write(endBraceNewline); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if _, err := fmt.Fprint(w, tag); err != nil {
|
||||
return err
|
||||
}
|
||||
if wire != WireStartGroup {
|
||||
if err := w.WriteByte(':'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if !w.compact || wire == WireStartGroup {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
switch wire {
|
||||
case WireBytes:
|
||||
buf, e := b.DecodeRawBytes(false)
|
||||
if e == nil {
|
||||
_, err = fmt.Fprintf(w, "%q", buf)
|
||||
} else {
|
||||
_, err = fmt.Fprintf(w, "/* %v */", e)
|
||||
}
|
||||
case WireFixed32:
|
||||
x, err = b.DecodeFixed32()
|
||||
err = writeUnknownInt(w, x, err)
|
||||
case WireFixed64:
|
||||
x, err = b.DecodeFixed64()
|
||||
err = writeUnknownInt(w, x, err)
|
||||
case WireStartGroup:
|
||||
err = w.WriteByte('{')
|
||||
w.indent()
|
||||
case WireVarint:
|
||||
x, err = b.DecodeVarint()
|
||||
err = writeUnknownInt(w, x, err)
|
||||
default:
|
||||
_, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeUnknownInt(w *textWriter, x uint64, err error) error {
|
||||
if err == nil {
|
||||
_, err = fmt.Fprint(w, x)
|
||||
} else {
|
||||
_, err = fmt.Fprintf(w, "/* %v */", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type int32Slice []int32
|
||||
|
||||
func (s int32Slice) Len() int { return len(s) }
|
||||
func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
|
||||
func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// writeExtensions writes all the extensions in pv.
|
||||
// pv is assumed to be a pointer to a protocol message struct that is extendable.
|
||||
func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
|
||||
emap := extensionMaps[pv.Type().Elem()]
|
||||
ep, _ := extendable(pv.Interface())
|
||||
|
||||
// Order the extensions by ID.
|
||||
// This isn't strictly necessary, but it will give us
|
||||
// canonical output, which will also make testing easier.
|
||||
m, mu := ep.extensionsRead()
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
mu.Lock()
|
||||
ids := make([]int32, 0, len(m))
|
||||
for id := range m {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
sort.Sort(int32Slice(ids))
|
||||
mu.Unlock()
|
||||
|
||||
for _, extNum := range ids {
|
||||
ext := m[extNum]
|
||||
var desc *ExtensionDesc
|
||||
if emap != nil {
|
||||
desc = emap[extNum]
|
||||
}
|
||||
if desc == nil {
|
||||
// Unknown extension.
|
||||
if err := writeUnknownStruct(w, ext.enc); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
pb, err := GetExtension(ep, desc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed getting extension: %v", err)
|
||||
}
|
||||
|
||||
// Repeated extensions will appear as a slice.
|
||||
if !desc.repeated() {
|
||||
if err := tm.writeExtension(w, desc.Name, pb); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
v := reflect.ValueOf(pb)
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
|
||||
if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *textWriter) writeIndent() {
|
||||
if !w.complete {
|
||||
return
|
||||
}
|
||||
remain := w.ind * 2
|
||||
for remain > 0 {
|
||||
n := remain
|
||||
if n > len(spaces) {
|
||||
n = len(spaces)
|
||||
}
|
||||
w.w.Write(spaces[:n])
|
||||
remain -= n
|
||||
}
|
||||
w.complete = false
|
||||
}
|
||||
|
||||
// TextMarshaler is a configurable text format marshaler.
|
||||
type TextMarshaler struct {
|
||||
Compact bool // use compact text format (one line).
|
||||
ExpandAny bool // expand google.protobuf.Any messages of known types
|
||||
}
|
||||
|
||||
// Marshal writes a given protocol buffer in text format.
|
||||
// The only errors returned are from w.
|
||||
func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
|
||||
val := reflect.ValueOf(pb)
|
||||
if pb == nil || val.IsNil() {
|
||||
w.Write([]byte("<nil>"))
|
||||
return nil
|
||||
}
|
||||
var bw *bufio.Writer
|
||||
ww, ok := w.(writer)
|
||||
if !ok {
|
||||
bw = bufio.NewWriter(w)
|
||||
ww = bw
|
||||
}
|
||||
aw := &textWriter{
|
||||
w: ww,
|
||||
complete: true,
|
||||
compact: tm.Compact,
|
||||
}
|
||||
|
||||
if etm, ok := pb.(encoding.TextMarshaler); ok {
|
||||
text, err := etm.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = aw.Write(text); err != nil {
|
||||
return err
|
||||
}
|
||||
if bw != nil {
|
||||
return bw.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// Dereference the received pointer so we don't have outer < and >.
|
||||
v := reflect.Indirect(val)
|
||||
if err := tm.writeStruct(aw, v); err != nil {
|
||||
return err
|
||||
}
|
||||
if bw != nil {
|
||||
return bw.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Text is the same as Marshal, but returns the string directly.
|
||||
func (tm *TextMarshaler) Text(pb Message) string {
|
||||
var buf bytes.Buffer
|
||||
tm.Marshal(&buf, pb)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
var (
|
||||
defaultTextMarshaler = TextMarshaler{}
|
||||
compactTextMarshaler = TextMarshaler{Compact: true}
|
||||
)
|
||||
|
||||
// TODO: consider removing some of the Marshal functions below.
|
||||
|
||||
// MarshalText writes a given protocol buffer in text format.
|
||||
// The only errors returned are from w.
|
||||
func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
|
||||
|
||||
// MarshalTextString is the same as MarshalText, but returns the string directly.
|
||||
func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
|
||||
|
||||
// CompactText writes a given protocol buffer in compact text format (one line).
|
||||
func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
|
||||
|
||||
// CompactTextString is the same as CompactText, but returns the string directly.
|
||||
func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
|
@ -0,0 +1,880 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
// Functions for parsing the Text protocol buffer format.
|
||||
// TODO: message sets.
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Error string emitted when deserializing Any and fields are already set
|
||||
const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
|
||||
|
||||
type ParseError struct {
|
||||
Message string
|
||||
Line int // 1-based line number
|
||||
Offset int // 0-based byte offset from start of input
|
||||
}
|
||||
|
||||
func (p *ParseError) Error() string {
|
||||
if p.Line == 1 {
|
||||
// show offset only for first line
|
||||
return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
|
||||
}
|
||||
return fmt.Sprintf("line %d: %v", p.Line, p.Message)
|
||||
}
|
||||
|
||||
type token struct {
|
||||
value string
|
||||
err *ParseError
|
||||
line int // line number
|
||||
offset int // byte number from start of input, not start of line
|
||||
unquoted string // the unquoted version of value, if it was a quoted string
|
||||
}
|
||||
|
||||
func (t *token) String() string {
|
||||
if t.err == nil {
|
||||
return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
|
||||
}
|
||||
return fmt.Sprintf("parse error: %v", t.err)
|
||||
}
|
||||
|
||||
type textParser struct {
|
||||
s string // remaining input
|
||||
done bool // whether the parsing is finished (success or error)
|
||||
backed bool // whether back() was called
|
||||
offset, line int
|
||||
cur token
|
||||
}
|
||||
|
||||
func newTextParser(s string) *textParser {
|
||||
p := new(textParser)
|
||||
p.s = s
|
||||
p.line = 1
|
||||
p.cur.line = 1
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
|
||||
pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
|
||||
p.cur.err = pe
|
||||
p.done = true
|
||||
return pe
|
||||
}
|
||||
|
||||
// Numbers and identifiers are matched by [-+._A-Za-z0-9]
|
||||
func isIdentOrNumberChar(c byte) bool {
|
||||
switch {
|
||||
case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
|
||||
return true
|
||||
case '0' <= c && c <= '9':
|
||||
return true
|
||||
}
|
||||
switch c {
|
||||
case '-', '+', '.', '_':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isWhitespace(c byte) bool {
|
||||
switch c {
|
||||
case ' ', '\t', '\n', '\r':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isQuote(c byte) bool {
|
||||
switch c {
|
||||
case '"', '\'':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *textParser) skipWhitespace() {
|
||||
i := 0
|
||||
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
|
||||
if p.s[i] == '#' {
|
||||
// comment; skip to end of line or input
|
||||
for i < len(p.s) && p.s[i] != '\n' {
|
||||
i++
|
||||
}
|
||||
if i == len(p.s) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if p.s[i] == '\n' {
|
||||
p.line++
|
||||
}
|
||||
i++
|
||||
}
|
||||
p.offset += i
|
||||
p.s = p.s[i:len(p.s)]
|
||||
if len(p.s) == 0 {
|
||||
p.done = true
|
||||
}
|
||||
}
|
||||
|
||||
func (p *textParser) advance() {
|
||||
// Skip whitespace
|
||||
p.skipWhitespace()
|
||||
if p.done {
|
||||
return
|
||||
}
|
||||
|
||||
// Start of non-whitespace
|
||||
p.cur.err = nil
|
||||
p.cur.offset, p.cur.line = p.offset, p.line
|
||||
p.cur.unquoted = ""
|
||||
switch p.s[0] {
|
||||
case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
|
||||
// Single symbol
|
||||
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
|
||||
case '"', '\'':
|
||||
// Quoted string
|
||||
i := 1
|
||||
for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
|
||||
if p.s[i] == '\\' && i+1 < len(p.s) {
|
||||
// skip escaped char
|
||||
i++
|
||||
}
|
||||
i++
|
||||
}
|
||||
if i >= len(p.s) || p.s[i] != p.s[0] {
|
||||
p.errorf("unmatched quote")
|
||||
return
|
||||
}
|
||||
unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
|
||||
if err != nil {
|
||||
p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
|
||||
return
|
||||
}
|
||||
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
|
||||
p.cur.unquoted = unq
|
||||
default:
|
||||
i := 0
|
||||
for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
|
||||
i++
|
||||
}
|
||||
if i == 0 {
|
||||
p.errorf("unexpected byte %#x", p.s[0])
|
||||
return
|
||||
}
|
||||
p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
|
||||
}
|
||||
p.offset += len(p.cur.value)
|
||||
}
|
||||
|
||||
var (
|
||||
errBadUTF8 = errors.New("proto: bad UTF-8")
|
||||
)
|
||||
|
||||
func unquoteC(s string, quote rune) (string, error) {
|
||||
// This is based on C++'s tokenizer.cc.
|
||||
// Despite its name, this is *not* parsing C syntax.
|
||||
// For instance, "\0" is an invalid quoted string.
|
||||
|
||||
// Avoid allocation in trivial cases.
|
||||
simple := true
|
||||
for _, r := range s {
|
||||
if r == '\\' || r == quote {
|
||||
simple = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if simple {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
buf := make([]byte, 0, 3*len(s)/2)
|
||||
for len(s) > 0 {
|
||||
r, n := utf8.DecodeRuneInString(s)
|
||||
if r == utf8.RuneError && n == 1 {
|
||||
return "", errBadUTF8
|
||||
}
|
||||
s = s[n:]
|
||||
if r != '\\' {
|
||||
if r < utf8.RuneSelf {
|
||||
buf = append(buf, byte(r))
|
||||
} else {
|
||||
buf = append(buf, string(r)...)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
ch, tail, err := unescape(s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
buf = append(buf, ch...)
|
||||
s = tail
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
func unescape(s string) (ch string, tail string, err error) {
|
||||
r, n := utf8.DecodeRuneInString(s)
|
||||
if r == utf8.RuneError && n == 1 {
|
||||
return "", "", errBadUTF8
|
||||
}
|
||||
s = s[n:]
|
||||
switch r {
|
||||
case 'a':
|
||||
return "\a", s, nil
|
||||
case 'b':
|
||||
return "\b", s, nil
|
||||
case 'f':
|
||||
return "\f", s, nil
|
||||
case 'n':
|
||||
return "\n", s, nil
|
||||
case 'r':
|
||||
return "\r", s, nil
|
||||
case 't':
|
||||
return "\t", s, nil
|
||||
case 'v':
|
||||
return "\v", s, nil
|
||||
case '?':
|
||||
return "?", s, nil // trigraph workaround
|
||||
case '\'', '"', '\\':
|
||||
return string(r), s, nil
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||
if len(s) < 2 {
|
||||
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
|
||||
}
|
||||
ss := string(r) + s[:2]
|
||||
s = s[2:]
|
||||
i, err := strconv.ParseUint(ss, 8, 8)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
|
||||
}
|
||||
return string([]byte{byte(i)}), s, nil
|
||||
case 'x', 'X', 'u', 'U':
|
||||
var n int
|
||||
switch r {
|
||||
case 'x', 'X':
|
||||
n = 2
|
||||
case 'u':
|
||||
n = 4
|
||||
case 'U':
|
||||
n = 8
|
||||
}
|
||||
if len(s) < n {
|
||||
return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
|
||||
}
|
||||
ss := s[:n]
|
||||
s = s[n:]
|
||||
i, err := strconv.ParseUint(ss, 16, 64)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
|
||||
}
|
||||
if r == 'x' || r == 'X' {
|
||||
return string([]byte{byte(i)}), s, nil
|
||||
}
|
||||
if i > utf8.MaxRune {
|
||||
return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
|
||||
}
|
||||
return string(i), s, nil
|
||||
}
|
||||
return "", "", fmt.Errorf(`unknown escape \%c`, r)
|
||||
}
|
||||
|
||||
// Back off the parser by one token. Can only be done between calls to next().
|
||||
// It makes the next advance() a no-op.
|
||||
func (p *textParser) back() { p.backed = true }
|
||||
|
||||
// Advances the parser and returns the new current token.
|
||||
func (p *textParser) next() *token {
|
||||
if p.backed || p.done {
|
||||
p.backed = false
|
||||
return &p.cur
|
||||
}
|
||||
p.advance()
|
||||
if p.done {
|
||||
p.cur.value = ""
|
||||
} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
|
||||
// Look for multiple quoted strings separated by whitespace,
|
||||
// and concatenate them.
|
||||
cat := p.cur
|
||||
for {
|
||||
p.skipWhitespace()
|
||||
if p.done || !isQuote(p.s[0]) {
|
||||
break
|
||||
}
|
||||
p.advance()
|
||||
if p.cur.err != nil {
|
||||
return &p.cur
|
||||
}
|
||||
cat.value += " " + p.cur.value
|
||||
cat.unquoted += p.cur.unquoted
|
||||
}
|
||||
p.done = false // parser may have seen EOF, but we want to return cat
|
||||
p.cur = cat
|
||||
}
|
||||
return &p.cur
|
||||
}
|
||||
|
||||
func (p *textParser) consumeToken(s string) error {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != s {
|
||||
p.back()
|
||||
return p.errorf("expected %q, found %q", s, tok.value)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Return a RequiredNotSetError indicating which required field was not set.
|
||||
func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
|
||||
st := sv.Type()
|
||||
sprops := GetProperties(st)
|
||||
for i := 0; i < st.NumField(); i++ {
|
||||
if !isNil(sv.Field(i)) {
|
||||
continue
|
||||
}
|
||||
|
||||
props := sprops.Prop[i]
|
||||
if props.Required {
|
||||
return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
|
||||
}
|
||||
}
|
||||
return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
|
||||
}
|
||||
|
||||
// Returns the index in the struct for the named field, as well as the parsed tag properties.
|
||||
func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
|
||||
i, ok := sprops.decoderOrigNames[name]
|
||||
if ok {
|
||||
return i, sprops.Prop[i], true
|
||||
}
|
||||
return -1, nil, false
|
||||
}
|
||||
|
||||
// Consume a ':' from the input stream (if the next token is a colon),
|
||||
// returning an error if a colon is needed but not present.
|
||||
func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != ":" {
|
||||
// Colon is optional when the field is a group or message.
|
||||
needColon := true
|
||||
switch props.Wire {
|
||||
case "group":
|
||||
needColon = false
|
||||
case "bytes":
|
||||
// A "bytes" field is either a message, a string, or a repeated field;
|
||||
// those three become *T, *string and []T respectively, so we can check for
|
||||
// this field being a pointer to a non-string.
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
// *T or *string
|
||||
if typ.Elem().Kind() == reflect.String {
|
||||
break
|
||||
}
|
||||
} else if typ.Kind() == reflect.Slice {
|
||||
// []T or []*T
|
||||
if typ.Elem().Kind() != reflect.Ptr {
|
||||
break
|
||||
}
|
||||
} else if typ.Kind() == reflect.String {
|
||||
// The proto3 exception is for a string field,
|
||||
// which requires a colon.
|
||||
break
|
||||
}
|
||||
needColon = false
|
||||
}
|
||||
if needColon {
|
||||
return p.errorf("expected ':', found %q", tok.value)
|
||||
}
|
||||
p.back()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
||||
st := sv.Type()
|
||||
sprops := GetProperties(st)
|
||||
reqCount := sprops.reqCount
|
||||
var reqFieldErr error
|
||||
fieldSet := make(map[string]bool)
|
||||
// A struct is a sequence of "name: value", terminated by one of
|
||||
// '>' or '}', or the end of the input. A name may also be
|
||||
// "[extension]" or "[type/url]".
|
||||
//
|
||||
// The whole struct can also be an expanded Any message, like:
|
||||
// [type/url] < ... struct contents ... >
|
||||
for {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == terminator {
|
||||
break
|
||||
}
|
||||
if tok.value == "[" {
|
||||
// Looks like an extension or an Any.
|
||||
//
|
||||
// TODO: Check whether we need to handle
|
||||
// namespace rooted names (e.g. ".something.Foo").
|
||||
extName, err := p.consumeExtName()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if s := strings.LastIndex(extName, "/"); s >= 0 {
|
||||
// If it contains a slash, it's an Any type URL.
|
||||
messageName := extName[s+1:]
|
||||
mt := MessageType(messageName)
|
||||
if mt == nil {
|
||||
return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
|
||||
}
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
// consume an optional colon
|
||||
if tok.value == ":" {
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
}
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "<":
|
||||
terminator = ">"
|
||||
case "{":
|
||||
terminator = "}"
|
||||
default:
|
||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
v := reflect.New(mt.Elem())
|
||||
if pe := p.readStruct(v.Elem(), terminator); pe != nil {
|
||||
return pe
|
||||
}
|
||||
b, err := Marshal(v.Interface().(Message))
|
||||
if err != nil {
|
||||
return p.errorf("failed to marshal message of type %q: %v", messageName, err)
|
||||
}
|
||||
if fieldSet["type_url"] {
|
||||
return p.errorf(anyRepeatedlyUnpacked, "type_url")
|
||||
}
|
||||
if fieldSet["value"] {
|
||||
return p.errorf(anyRepeatedlyUnpacked, "value")
|
||||
}
|
||||
sv.FieldByName("TypeUrl").SetString(extName)
|
||||
sv.FieldByName("Value").SetBytes(b)
|
||||
fieldSet["type_url"] = true
|
||||
fieldSet["value"] = true
|
||||
continue
|
||||
}
|
||||
|
||||
var desc *ExtensionDesc
|
||||
// This could be faster, but it's functional.
|
||||
// TODO: Do something smarter than a linear scan.
|
||||
for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
|
||||
if d.Name == extName {
|
||||
desc = d
|
||||
break
|
||||
}
|
||||
}
|
||||
if desc == nil {
|
||||
return p.errorf("unrecognized extension %q", extName)
|
||||
}
|
||||
|
||||
props := &Properties{}
|
||||
props.Parse(desc.Tag)
|
||||
|
||||
typ := reflect.TypeOf(desc.ExtensionType)
|
||||
if err := p.checkForColon(props, typ); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rep := desc.repeated()
|
||||
|
||||
// Read the extension structure, and set it in
|
||||
// the value we're constructing.
|
||||
var ext reflect.Value
|
||||
if !rep {
|
||||
ext = reflect.New(typ).Elem()
|
||||
} else {
|
||||
ext = reflect.New(typ.Elem()).Elem()
|
||||
}
|
||||
if err := p.readAny(ext, props); err != nil {
|
||||
if _, ok := err.(*RequiredNotSetError); !ok {
|
||||
return err
|
||||
}
|
||||
reqFieldErr = err
|
||||
}
|
||||
ep := sv.Addr().Interface().(Message)
|
||||
if !rep {
|
||||
SetExtension(ep, desc, ext.Interface())
|
||||
} else {
|
||||
old, err := GetExtension(ep, desc)
|
||||
var sl reflect.Value
|
||||
if err == nil {
|
||||
sl = reflect.ValueOf(old) // existing slice
|
||||
} else {
|
||||
sl = reflect.MakeSlice(typ, 0, 1)
|
||||
}
|
||||
sl = reflect.Append(sl, ext)
|
||||
SetExtension(ep, desc, sl.Interface())
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// This is a normal, non-extension field.
|
||||
name := tok.value
|
||||
var dst reflect.Value
|
||||
fi, props, ok := structFieldByName(sprops, name)
|
||||
if ok {
|
||||
dst = sv.Field(fi)
|
||||
} else if oop, ok := sprops.OneofTypes[name]; ok {
|
||||
// It is a oneof.
|
||||
props = oop.Prop
|
||||
nv := reflect.New(oop.Type.Elem())
|
||||
dst = nv.Elem().Field(0)
|
||||
field := sv.Field(oop.Field)
|
||||
if !field.IsNil() {
|
||||
return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
|
||||
}
|
||||
field.Set(nv)
|
||||
}
|
||||
if !dst.IsValid() {
|
||||
return p.errorf("unknown field name %q in %v", name, st)
|
||||
}
|
||||
|
||||
if dst.Kind() == reflect.Map {
|
||||
// Consume any colon.
|
||||
if err := p.checkForColon(props, dst.Type()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Construct the map if it doesn't already exist.
|
||||
if dst.IsNil() {
|
||||
dst.Set(reflect.MakeMap(dst.Type()))
|
||||
}
|
||||
key := reflect.New(dst.Type().Key()).Elem()
|
||||
val := reflect.New(dst.Type().Elem()).Elem()
|
||||
|
||||
// The map entry should be this sequence of tokens:
|
||||
// < key : KEY value : VALUE >
|
||||
// However, implementations may omit key or value, and technically
|
||||
// we should support them in any order. See b/28924776 for a time
|
||||
// this went wrong.
|
||||
|
||||
tok := p.next()
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "<":
|
||||
terminator = ">"
|
||||
case "{":
|
||||
terminator = "}"
|
||||
default:
|
||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
for {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == terminator {
|
||||
break
|
||||
}
|
||||
switch tok.value {
|
||||
case "key":
|
||||
if err := p.consumeToken(":"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.readAny(key, props.MapKeyProp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
case "value":
|
||||
if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.readAny(val, props.MapValProp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
p.back()
|
||||
return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
|
||||
}
|
||||
}
|
||||
|
||||
dst.SetMapIndex(key, val)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check that it's not already set if it's not a repeated field.
|
||||
if !props.Repeated && fieldSet[name] {
|
||||
return p.errorf("non-repeated field %q was repeated", name)
|
||||
}
|
||||
|
||||
if err := p.checkForColon(props, dst.Type()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Parse into the field.
|
||||
fieldSet[name] = true
|
||||
if err := p.readAny(dst, props); err != nil {
|
||||
if _, ok := err.(*RequiredNotSetError); !ok {
|
||||
return err
|
||||
}
|
||||
reqFieldErr = err
|
||||
}
|
||||
if props.Required {
|
||||
reqCount--
|
||||
}
|
||||
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if reqCount > 0 {
|
||||
return p.missingRequiredFieldError(sv)
|
||||
}
|
||||
return reqFieldErr
|
||||
}
|
||||
|
||||
// consumeExtName consumes extension name or expanded Any type URL and the
|
||||
// following ']'. It returns the name or URL consumed.
|
||||
func (p *textParser) consumeExtName() (string, error) {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return "", tok.err
|
||||
}
|
||||
|
||||
// If extension name or type url is quoted, it's a single token.
|
||||
if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
|
||||
name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return name, p.consumeToken("]")
|
||||
}
|
||||
|
||||
// Consume everything up to "]"
|
||||
var parts []string
|
||||
for tok.value != "]" {
|
||||
parts = append(parts, tok.value)
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
|
||||
}
|
||||
if p.done && tok.value != "]" {
|
||||
return "", p.errorf("unclosed type_url or extension name")
|
||||
}
|
||||
}
|
||||
return strings.Join(parts, ""), nil
|
||||
}
|
||||
|
||||
// consumeOptionalSeparator consumes an optional semicolon or comma.
|
||||
// It is used in readStruct to provide backward compatibility.
|
||||
func (p *textParser) consumeOptionalSeparator() error {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != ";" && tok.value != "," {
|
||||
p.back()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *textParser) readAny(v reflect.Value, props *Properties) error {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == "" {
|
||||
return p.errorf("unexpected EOF")
|
||||
}
|
||||
|
||||
switch fv := v; fv.Kind() {
|
||||
case reflect.Slice:
|
||||
at := v.Type()
|
||||
if at.Elem().Kind() == reflect.Uint8 {
|
||||
// Special case for []byte
|
||||
if tok.value[0] != '"' && tok.value[0] != '\'' {
|
||||
// Deliberately written out here, as the error after
|
||||
// this switch statement would write "invalid []byte: ...",
|
||||
// which is not as user-friendly.
|
||||
return p.errorf("invalid string: %v", tok.value)
|
||||
}
|
||||
bytes := []byte(tok.unquoted)
|
||||
fv.Set(reflect.ValueOf(bytes))
|
||||
return nil
|
||||
}
|
||||
// Repeated field.
|
||||
if tok.value == "[" {
|
||||
// Repeated field with list notation, like [1,2,3].
|
||||
for {
|
||||
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
|
||||
err := p.readAny(fv.Index(fv.Len()-1), props)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == "]" {
|
||||
break
|
||||
}
|
||||
if tok.value != "," {
|
||||
return p.errorf("Expected ']' or ',' found %q", tok.value)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// One value of the repeated field.
|
||||
p.back()
|
||||
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
|
||||
return p.readAny(fv.Index(fv.Len()-1), props)
|
||||
case reflect.Bool:
|
||||
// true/1/t/True or false/f/0/False.
|
||||
switch tok.value {
|
||||
case "true", "1", "t", "True":
|
||||
fv.SetBool(true)
|
||||
return nil
|
||||
case "false", "0", "f", "False":
|
||||
fv.SetBool(false)
|
||||
return nil
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
v := tok.value
|
||||
// Ignore 'f' for compatibility with output generated by C++, but don't
|
||||
// remove 'f' when the value is "-inf" or "inf".
|
||||
if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
|
||||
v = v[:len(v)-1]
|
||||
}
|
||||
if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
|
||||
fv.SetFloat(f)
|
||||
return nil
|
||||
}
|
||||
case reflect.Int32:
|
||||
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
|
||||
fv.SetInt(x)
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(props.Enum) == 0 {
|
||||
break
|
||||
}
|
||||
m, ok := enumValueMaps[props.Enum]
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
x, ok := m[tok.value]
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
fv.SetInt(int64(x))
|
||||
return nil
|
||||
case reflect.Int64:
|
||||
if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
|
||||
fv.SetInt(x)
|
||||
return nil
|
||||
}
|
||||
|
||||
case reflect.Ptr:
|
||||
// A basic field (indirected through pointer), or a repeated message/group
|
||||
p.back()
|
||||
fv.Set(reflect.New(fv.Type().Elem()))
|
||||
return p.readAny(fv.Elem(), props)
|
||||
case reflect.String:
|
||||
if tok.value[0] == '"' || tok.value[0] == '\'' {
|
||||
fv.SetString(tok.unquoted)
|
||||
return nil
|
||||
}
|
||||
case reflect.Struct:
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "{":
|
||||
terminator = "}"
|
||||
case "<":
|
||||
terminator = ">"
|
||||
default:
|
||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
// TODO: Handle nested messages which implement encoding.TextUnmarshaler.
|
||||
return p.readStruct(fv, terminator)
|
||||
case reflect.Uint32:
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
|
||||
fv.SetUint(uint64(x))
|
||||
return nil
|
||||
}
|
||||
case reflect.Uint64:
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
|
||||
fv.SetUint(x)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return p.errorf("invalid %v: %v", v.Type(), tok.value)
|
||||
}
|
||||
|
||||
// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
|
||||
// before starting to unmarshal, so any existing data in pb is always removed.
|
||||
// If a required field is not set and no other error occurs,
|
||||
// UnmarshalText returns *RequiredNotSetError.
|
||||
func UnmarshalText(s string, pb Message) error {
|
||||
if um, ok := pb.(encoding.TextUnmarshaler); ok {
|
||||
return um.UnmarshalText([]byte(s))
|
||||
}
|
||||
pb.Reset()
|
||||
v := reflect.ValueOf(pb)
|
||||
return newTextParser(s).readStruct(v.Elem(), "")
|
||||
}
|
@ -0,0 +1,141 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package ptypes
|
||||
|
||||
// This file implements functions to marshal proto.Message to/from
|
||||
// google.protobuf.Any message.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes/any"
|
||||
)
|
||||
|
||||
const googleApis = "type.googleapis.com/"
|
||||
|
||||
// AnyMessageName returns the name of the message contained in a google.protobuf.Any message.
|
||||
//
|
||||
// Note that regular type assertions should be done using the Is
|
||||
// function. AnyMessageName is provided for less common use cases like filtering a
|
||||
// sequence of Any messages based on a set of allowed message type names.
|
||||
func AnyMessageName(any *any.Any) (string, error) {
|
||||
if any == nil {
|
||||
return "", fmt.Errorf("message is nil")
|
||||
}
|
||||
slash := strings.LastIndex(any.TypeUrl, "/")
|
||||
if slash < 0 {
|
||||
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
|
||||
}
|
||||
return any.TypeUrl[slash+1:], nil
|
||||
}
|
||||
|
||||
// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any.
|
||||
func MarshalAny(pb proto.Message) (*any.Any, error) {
|
||||
value, err := proto.Marshal(pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil
|
||||
}
|
||||
|
||||
// DynamicAny is a value that can be passed to UnmarshalAny to automatically
|
||||
// allocate a proto.Message for the type specified in a google.protobuf.Any
|
||||
// message. The allocated message is stored in the embedded proto.Message.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var x ptypes.DynamicAny
|
||||
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
|
||||
// fmt.Printf("unmarshaled message: %v", x.Message)
|
||||
type DynamicAny struct {
|
||||
proto.Message
|
||||
}
|
||||
|
||||
// Empty returns a new proto.Message of the type specified in a
|
||||
// google.protobuf.Any message. It returns an error if corresponding message
|
||||
// type isn't linked in.
|
||||
func Empty(any *any.Any) (proto.Message, error) {
|
||||
aname, err := AnyMessageName(any)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t := proto.MessageType(aname)
|
||||
if t == nil {
|
||||
return nil, fmt.Errorf("any: message type %q isn't linked in", aname)
|
||||
}
|
||||
return reflect.New(t.Elem()).Interface().(proto.Message), nil
|
||||
}
|
||||
|
||||
// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any
|
||||
// message and places the decoded result in pb. It returns an error if type of
|
||||
// contents of Any message does not match type of pb message.
|
||||
//
|
||||
// pb can be a proto.Message, or a *DynamicAny.
|
||||
func UnmarshalAny(any *any.Any, pb proto.Message) error {
|
||||
if d, ok := pb.(*DynamicAny); ok {
|
||||
if d.Message == nil {
|
||||
var err error
|
||||
d.Message, err = Empty(any)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return UnmarshalAny(any, d.Message)
|
||||
}
|
||||
|
||||
aname, err := AnyMessageName(any)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mname := proto.MessageName(pb)
|
||||
if aname != mname {
|
||||
return fmt.Errorf("mismatched message type: got %q want %q", aname, mname)
|
||||
}
|
||||
return proto.Unmarshal(any.Value, pb)
|
||||
}
|
||||
|
||||
// Is returns true if any value contains a given message type.
|
||||
func Is(any *any.Any, pb proto.Message) bool {
|
||||
// The following is equivalent to AnyMessageName(any) == proto.MessageName(pb),
|
||||
// but it avoids scanning TypeUrl for the slash.
|
||||
if any == nil {
|
||||
return false
|
||||
}
|
||||
name := proto.MessageName(pb)
|
||||
prefix := len(any.TypeUrl) - len(name)
|
||||
return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name
|
||||
}
|
@ -0,0 +1,200 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/protobuf/any.proto
|
||||
|
||||
package any
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// `Any` contains an arbitrary serialized protocol buffer message along with a
|
||||
// URL that describes the type of the serialized message.
|
||||
//
|
||||
// Protobuf library provides support to pack/unpack Any values in the form
|
||||
// of utility functions or additional generated methods of the Any type.
|
||||
//
|
||||
// Example 1: Pack and unpack a message in C++.
|
||||
//
|
||||
// Foo foo = ...;
|
||||
// Any any;
|
||||
// any.PackFrom(foo);
|
||||
// ...
|
||||
// if (any.UnpackTo(&foo)) {
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// Example 2: Pack and unpack a message in Java.
|
||||
//
|
||||
// Foo foo = ...;
|
||||
// Any any = Any.pack(foo);
|
||||
// ...
|
||||
// if (any.is(Foo.class)) {
|
||||
// foo = any.unpack(Foo.class);
|
||||
// }
|
||||
//
|
||||
// Example 3: Pack and unpack a message in Python.
|
||||
//
|
||||
// foo = Foo(...)
|
||||
// any = Any()
|
||||
// any.Pack(foo)
|
||||
// ...
|
||||
// if any.Is(Foo.DESCRIPTOR):
|
||||
// any.Unpack(foo)
|
||||
// ...
|
||||
//
|
||||
// Example 4: Pack and unpack a message in Go
|
||||
//
|
||||
// foo := &pb.Foo{...}
|
||||
// any, err := ptypes.MarshalAny(foo)
|
||||
// ...
|
||||
// foo := &pb.Foo{}
|
||||
// if err := ptypes.UnmarshalAny(any, foo); err != nil {
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// The pack methods provided by protobuf library will by default use
|
||||
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
|
||||
// methods only use the fully qualified type name after the last '/'
|
||||
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
|
||||
// name "y.z".
|
||||
//
|
||||
//
|
||||
// JSON
|
||||
// ====
|
||||
// The JSON representation of an `Any` value uses the regular
|
||||
// representation of the deserialized, embedded message, with an
|
||||
// additional field `@type` which contains the type URL. Example:
|
||||
//
|
||||
// package google.profile;
|
||||
// message Person {
|
||||
// string first_name = 1;
|
||||
// string last_name = 2;
|
||||
// }
|
||||
//
|
||||
// {
|
||||
// "@type": "type.googleapis.com/google.profile.Person",
|
||||
// "firstName": <string>,
|
||||
// "lastName": <string>
|
||||
// }
|
||||
//
|
||||
// If the embedded message type is well-known and has a custom JSON
|
||||
// representation, that representation will be embedded adding a field
|
||||
// `value` which holds the custom JSON in addition to the `@type`
|
||||
// field. Example (for message [google.protobuf.Duration][]):
|
||||
//
|
||||
// {
|
||||
// "@type": "type.googleapis.com/google.protobuf.Duration",
|
||||
// "value": "1.212s"
|
||||
// }
|
||||
//
|
||||
type Any struct {
|
||||
// A URL/resource name that uniquely identifies the type of the serialized
|
||||
// protocol buffer message. The last segment of the URL's path must represent
|
||||
// the fully qualified name of the type (as in
|
||||
// `path/google.protobuf.Duration`). The name should be in a canonical form
|
||||
// (e.g., leading "." is not accepted).
|
||||
//
|
||||
// In practice, teams usually precompile into the binary all types that they
|
||||
// expect it to use in the context of Any. However, for URLs which use the
|
||||
// scheme `http`, `https`, or no scheme, one can optionally set up a type
|
||||
// server that maps type URLs to message definitions as follows:
|
||||
//
|
||||
// * If no scheme is provided, `https` is assumed.
|
||||
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
|
||||
// value in binary format, or produce an error.
|
||||
// * Applications are allowed to cache lookup results based on the
|
||||
// URL, or have them precompiled into a binary to avoid any
|
||||
// lookup. Therefore, binary compatibility needs to be preserved
|
||||
// on changes to types. (Use versioned type names to manage
|
||||
// breaking changes.)
|
||||
//
|
||||
// Note: this functionality is not currently available in the official
|
||||
// protobuf release, and it is not used for type URLs beginning with
|
||||
// type.googleapis.com.
|
||||
//
|
||||
// Schemes other than `http`, `https` (or the empty scheme) might be
|
||||
// used with implementation specific semantics.
|
||||
//
|
||||
TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
|
||||
// Must be a valid serialized protocol buffer of the above specified type.
|
||||
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Any) Reset() { *m = Any{} }
|
||||
func (m *Any) String() string { return proto.CompactTextString(m) }
|
||||
func (*Any) ProtoMessage() {}
|
||||
func (*Any) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b53526c13ae22eb4, []int{0}
|
||||
}
|
||||
|
||||
func (*Any) XXX_WellKnownType() string { return "Any" }
|
||||
|
||||
func (m *Any) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Any.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Any.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Any) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Any.Merge(m, src)
|
||||
}
|
||||
func (m *Any) XXX_Size() int {
|
||||
return xxx_messageInfo_Any.Size(m)
|
||||
}
|
||||
func (m *Any) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Any.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Any proto.InternalMessageInfo
|
||||
|
||||
func (m *Any) GetTypeUrl() string {
|
||||
if m != nil {
|
||||
return m.TypeUrl
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Any) GetValue() []byte {
|
||||
if m != nil {
|
||||
return m.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) }
|
||||
|
||||
var fileDescriptor_b53526c13ae22eb4 = []byte{
|
||||
// 185 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
|
||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,
|
||||
0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a,
|
||||
0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46,
|
||||
0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7,
|
||||
0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce,
|
||||
0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52,
|
||||
0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc,
|
||||
0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c,
|
||||
0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce,
|
||||
0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff,
|
||||
0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00,
|
||||
}
|
@ -0,0 +1,154 @@
|
||||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.protobuf;
|
||||
|
||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||
option go_package = "github.com/golang/protobuf/ptypes/any";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "AnyProto";
|
||||
option java_multiple_files = true;
|
||||
option objc_class_prefix = "GPB";
|
||||
|
||||
// `Any` contains an arbitrary serialized protocol buffer message along with a
|
||||
// URL that describes the type of the serialized message.
|
||||
//
|
||||
// Protobuf library provides support to pack/unpack Any values in the form
|
||||
// of utility functions or additional generated methods of the Any type.
|
||||
//
|
||||
// Example 1: Pack and unpack a message in C++.
|
||||
//
|
||||
// Foo foo = ...;
|
||||
// Any any;
|
||||
// any.PackFrom(foo);
|
||||
// ...
|
||||
// if (any.UnpackTo(&foo)) {
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// Example 2: Pack and unpack a message in Java.
|
||||
//
|
||||
// Foo foo = ...;
|
||||
// Any any = Any.pack(foo);
|
||||
// ...
|
||||
// if (any.is(Foo.class)) {
|
||||
// foo = any.unpack(Foo.class);
|
||||
// }
|
||||
//
|
||||
// Example 3: Pack and unpack a message in Python.
|
||||
//
|
||||
// foo = Foo(...)
|
||||
// any = Any()
|
||||
// any.Pack(foo)
|
||||
// ...
|
||||
// if any.Is(Foo.DESCRIPTOR):
|
||||
// any.Unpack(foo)
|
||||
// ...
|
||||
//
|
||||
// Example 4: Pack and unpack a message in Go
|
||||
//
|
||||
// foo := &pb.Foo{...}
|
||||
// any, err := ptypes.MarshalAny(foo)
|
||||
// ...
|
||||
// foo := &pb.Foo{}
|
||||
// if err := ptypes.UnmarshalAny(any, foo); err != nil {
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// The pack methods provided by protobuf library will by default use
|
||||
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
|
||||
// methods only use the fully qualified type name after the last '/'
|
||||
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
|
||||
// name "y.z".
|
||||
//
|
||||
//
|
||||
// JSON
|
||||
// ====
|
||||
// The JSON representation of an `Any` value uses the regular
|
||||
// representation of the deserialized, embedded message, with an
|
||||
// additional field `@type` which contains the type URL. Example:
|
||||
//
|
||||
// package google.profile;
|
||||
// message Person {
|
||||
// string first_name = 1;
|
||||
// string last_name = 2;
|
||||
// }
|
||||
//
|
||||
// {
|
||||
// "@type": "type.googleapis.com/google.profile.Person",
|
||||
// "firstName": <string>,
|
||||
// "lastName": <string>
|
||||
// }
|
||||
//
|
||||
// If the embedded message type is well-known and has a custom JSON
|
||||
// representation, that representation will be embedded adding a field
|
||||
// `value` which holds the custom JSON in addition to the `@type`
|
||||
// field. Example (for message [google.protobuf.Duration][]):
|
||||
//
|
||||
// {
|
||||
// "@type": "type.googleapis.com/google.protobuf.Duration",
|
||||
// "value": "1.212s"
|
||||
// }
|
||||
//
|
||||
message Any {
|
||||
// A URL/resource name that uniquely identifies the type of the serialized
|
||||
// protocol buffer message. The last segment of the URL's path must represent
|
||||
// the fully qualified name of the type (as in
|
||||
// `path/google.protobuf.Duration`). The name should be in a canonical form
|
||||
// (e.g., leading "." is not accepted).
|
||||
//
|
||||
// In practice, teams usually precompile into the binary all types that they
|
||||
// expect it to use in the context of Any. However, for URLs which use the
|
||||
// scheme `http`, `https`, or no scheme, one can optionally set up a type
|
||||
// server that maps type URLs to message definitions as follows:
|
||||
//
|
||||
// * If no scheme is provided, `https` is assumed.
|
||||
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
|
||||
// value in binary format, or produce an error.
|
||||
// * Applications are allowed to cache lookup results based on the
|
||||
// URL, or have them precompiled into a binary to avoid any
|
||||
// lookup. Therefore, binary compatibility needs to be preserved
|
||||
// on changes to types. (Use versioned type names to manage
|
||||
// breaking changes.)
|
||||
//
|
||||
// Note: this functionality is not currently available in the official
|
||||
// protobuf release, and it is not used for type URLs beginning with
|
||||
// type.googleapis.com.
|
||||
//
|
||||
// Schemes other than `http`, `https` (or the empty scheme) might be
|
||||
// used with implementation specific semantics.
|
||||
//
|
||||
string type_url = 1;
|
||||
|
||||
// Must be a valid serialized protocol buffer of the above specified type.
|
||||
bytes value = 2;
|
||||
}
|
@ -0,0 +1,35 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
/*
|
||||
Package ptypes contains code for interacting with well-known types.
|
||||
*/
|
||||
package ptypes
|
@ -0,0 +1,102 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package ptypes
|
||||
|
||||
// This file implements conversions between google.protobuf.Duration
|
||||
// and time.Duration.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
durpb "github.com/golang/protobuf/ptypes/duration"
|
||||
)
|
||||
|
||||
const (
|
||||
// Range of a durpb.Duration in seconds, as specified in
|
||||
// google/protobuf/duration.proto. This is about 10,000 years in seconds.
|
||||
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
|
||||
minSeconds = -maxSeconds
|
||||
)
|
||||
|
||||
// validateDuration determines whether the durpb.Duration is valid according to the
|
||||
// definition in google/protobuf/duration.proto. A valid durpb.Duration
|
||||
// may still be too large to fit into a time.Duration (the range of durpb.Duration
|
||||
// is about 10,000 years, and the range of time.Duration is about 290).
|
||||
func validateDuration(d *durpb.Duration) error {
|
||||
if d == nil {
|
||||
return errors.New("duration: nil Duration")
|
||||
}
|
||||
if d.Seconds < minSeconds || d.Seconds > maxSeconds {
|
||||
return fmt.Errorf("duration: %v: seconds out of range", d)
|
||||
}
|
||||
if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
|
||||
return fmt.Errorf("duration: %v: nanos out of range", d)
|
||||
}
|
||||
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
|
||||
if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
|
||||
return fmt.Errorf("duration: %v: seconds and nanos have different signs", d)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Duration converts a durpb.Duration to a time.Duration. Duration
|
||||
// returns an error if the durpb.Duration is invalid or is too large to be
|
||||
// represented in a time.Duration.
|
||||
func Duration(p *durpb.Duration) (time.Duration, error) {
|
||||
if err := validateDuration(p); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
d := time.Duration(p.Seconds) * time.Second
|
||||
if int64(d/time.Second) != p.Seconds {
|
||||
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
|
||||
}
|
||||
if p.Nanos != 0 {
|
||||
d += time.Duration(p.Nanos) * time.Nanosecond
|
||||
if (d < 0) != (p.Nanos < 0) {
|
||||
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
|
||||
}
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// DurationProto converts a time.Duration to a durpb.Duration.
|
||||
func DurationProto(d time.Duration) *durpb.Duration {
|
||||
nanos := d.Nanoseconds()
|
||||
secs := nanos / 1e9
|
||||
nanos -= secs * 1e9
|
||||
return &durpb.Duration{
|
||||
Seconds: secs,
|
||||
Nanos: int32(nanos),
|
||||
}
|
||||
}
|
@ -0,0 +1,161 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/protobuf/duration.proto
|
||||
|
||||
package duration
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// A Duration represents a signed, fixed-length span of time represented
|
||||
// as a count of seconds and fractions of seconds at nanosecond
|
||||
// resolution. It is independent of any calendar and concepts like "day"
|
||||
// or "month". It is related to Timestamp in that the difference between
|
||||
// two Timestamp values is a Duration and it can be added or subtracted
|
||||
// from a Timestamp. Range is approximately +-10,000 years.
|
||||
//
|
||||
// # Examples
|
||||
//
|
||||
// Example 1: Compute Duration from two Timestamps in pseudo code.
|
||||
//
|
||||
// Timestamp start = ...;
|
||||
// Timestamp end = ...;
|
||||
// Duration duration = ...;
|
||||
//
|
||||
// duration.seconds = end.seconds - start.seconds;
|
||||
// duration.nanos = end.nanos - start.nanos;
|
||||
//
|
||||
// if (duration.seconds < 0 && duration.nanos > 0) {
|
||||
// duration.seconds += 1;
|
||||
// duration.nanos -= 1000000000;
|
||||
// } else if (durations.seconds > 0 && duration.nanos < 0) {
|
||||
// duration.seconds -= 1;
|
||||
// duration.nanos += 1000000000;
|
||||
// }
|
||||
//
|
||||
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
|
||||
//
|
||||
// Timestamp start = ...;
|
||||
// Duration duration = ...;
|
||||
// Timestamp end = ...;
|
||||
//
|
||||
// end.seconds = start.seconds + duration.seconds;
|
||||
// end.nanos = start.nanos + duration.nanos;
|
||||
//
|
||||
// if (end.nanos < 0) {
|
||||
// end.seconds -= 1;
|
||||
// end.nanos += 1000000000;
|
||||
// } else if (end.nanos >= 1000000000) {
|
||||
// end.seconds += 1;
|
||||
// end.nanos -= 1000000000;
|
||||
// }
|
||||
//
|
||||
// Example 3: Compute Duration from datetime.timedelta in Python.
|
||||
//
|
||||
// td = datetime.timedelta(days=3, minutes=10)
|
||||
// duration = Duration()
|
||||
// duration.FromTimedelta(td)
|
||||
//
|
||||
// # JSON Mapping
|
||||
//
|
||||
// In JSON format, the Duration type is encoded as a string rather than an
|
||||
// object, where the string ends in the suffix "s" (indicating seconds) and
|
||||
// is preceded by the number of seconds, with nanoseconds expressed as
|
||||
// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
|
||||
// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
|
||||
// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
|
||||
// microsecond should be expressed in JSON format as "3.000001s".
|
||||
//
|
||||
//
|
||||
type Duration struct {
|
||||
// Signed seconds of the span of time. Must be from -315,576,000,000
|
||||
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
|
||||
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
|
||||
Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
|
||||
// Signed fractions of a second at nanosecond resolution of the span
|
||||
// of time. Durations less than one second are represented with a 0
|
||||
// `seconds` field and a positive or negative `nanos` field. For durations
|
||||
// of one second or more, a non-zero value for the `nanos` field must be
|
||||
// of the same sign as the `seconds` field. Must be from -999,999,999
|
||||
// to +999,999,999 inclusive.
|
||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Duration) Reset() { *m = Duration{} }
|
||||
func (m *Duration) String() string { return proto.CompactTextString(m) }
|
||||
func (*Duration) ProtoMessage() {}
|
||||
func (*Duration) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_23597b2ebd7ac6c5, []int{0}
|
||||
}
|
||||
|
||||
func (*Duration) XXX_WellKnownType() string { return "Duration" }
|
||||
|
||||
func (m *Duration) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Duration.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Duration.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Duration) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Duration.Merge(m, src)
|
||||
}
|
||||
func (m *Duration) XXX_Size() int {
|
||||
return xxx_messageInfo_Duration.Size(m)
|
||||
}
|
||||
func (m *Duration) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Duration.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Duration proto.InternalMessageInfo
|
||||
|
||||
func (m *Duration) GetSeconds() int64 {
|
||||
if m != nil {
|
||||
return m.Seconds
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Duration) GetNanos() int32 {
|
||||
if m != nil {
|
||||
return m.Nanos
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) }
|
||||
|
||||
var fileDescriptor_23597b2ebd7ac6c5 = []byte{
|
||||
// 190 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
|
||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a,
|
||||
0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56,
|
||||
0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5,
|
||||
0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e,
|
||||
0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c,
|
||||
0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56,
|
||||
0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e,
|
||||
0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4,
|
||||
0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78,
|
||||
0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63,
|
||||
0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00,
|
||||
}
|
@ -0,0 +1,117 @@
|
||||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.protobuf;
|
||||
|
||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||
option cc_enable_arenas = true;
|
||||
option go_package = "github.com/golang/protobuf/ptypes/duration";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "DurationProto";
|
||||
option java_multiple_files = true;
|
||||
option objc_class_prefix = "GPB";
|
||||
|
||||
// A Duration represents a signed, fixed-length span of time represented
|
||||
// as a count of seconds and fractions of seconds at nanosecond
|
||||
// resolution. It is independent of any calendar and concepts like "day"
|
||||
// or "month". It is related to Timestamp in that the difference between
|
||||
// two Timestamp values is a Duration and it can be added or subtracted
|
||||
// from a Timestamp. Range is approximately +-10,000 years.
|
||||
//
|
||||
// # Examples
|
||||
//
|
||||
// Example 1: Compute Duration from two Timestamps in pseudo code.
|
||||
//
|
||||
// Timestamp start = ...;
|
||||
// Timestamp end = ...;
|
||||
// Duration duration = ...;
|
||||
//
|
||||
// duration.seconds = end.seconds - start.seconds;
|
||||
// duration.nanos = end.nanos - start.nanos;
|
||||
//
|
||||
// if (duration.seconds < 0 && duration.nanos > 0) {
|
||||
// duration.seconds += 1;
|
||||
// duration.nanos -= 1000000000;
|
||||
// } else if (durations.seconds > 0 && duration.nanos < 0) {
|
||||
// duration.seconds -= 1;
|
||||
// duration.nanos += 1000000000;
|
||||
// }
|
||||
//
|
||||
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
|
||||
//
|
||||
// Timestamp start = ...;
|
||||
// Duration duration = ...;
|
||||
// Timestamp end = ...;
|
||||
//
|
||||
// end.seconds = start.seconds + duration.seconds;
|
||||
// end.nanos = start.nanos + duration.nanos;
|
||||
//
|
||||
// if (end.nanos < 0) {
|
||||
// end.seconds -= 1;
|
||||
// end.nanos += 1000000000;
|
||||
// } else if (end.nanos >= 1000000000) {
|
||||
// end.seconds += 1;
|
||||
// end.nanos -= 1000000000;
|
||||
// }
|
||||
//
|
||||
// Example 3: Compute Duration from datetime.timedelta in Python.
|
||||
//
|
||||
// td = datetime.timedelta(days=3, minutes=10)
|
||||
// duration = Duration()
|
||||
// duration.FromTimedelta(td)
|
||||
//
|
||||
// # JSON Mapping
|
||||
//
|
||||
// In JSON format, the Duration type is encoded as a string rather than an
|
||||
// object, where the string ends in the suffix "s" (indicating seconds) and
|
||||
// is preceded by the number of seconds, with nanoseconds expressed as
|
||||
// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
|
||||
// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
|
||||
// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
|
||||
// microsecond should be expressed in JSON format as "3.000001s".
|
||||
//
|
||||
//
|
||||
message Duration {
|
||||
|
||||
// Signed seconds of the span of time. Must be from -315,576,000,000
|
||||
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
|
||||
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
|
||||
int64 seconds = 1;
|
||||
|
||||
// Signed fractions of a second at nanosecond resolution of the span
|
||||
// of time. Durations less than one second are represented with a 0
|
||||
// `seconds` field and a positive or negative `nanos` field. For durations
|
||||
// of one second or more, a non-zero value for the `nanos` field must be
|
||||
// of the same sign as the `seconds` field. Must be from -999,999,999
|
||||
// to +999,999,999 inclusive.
|
||||
int32 nanos = 2;
|
||||
}
|
@ -0,0 +1,83 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/protobuf/empty.proto
|
||||
|
||||
package empty
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// A generic empty message that you can re-use to avoid defining duplicated
|
||||
// empty messages in your APIs. A typical example is to use it as the request
|
||||
// or the response type of an API method. For instance:
|
||||
//
|
||||
// service Foo {
|
||||
// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
|
||||
// }
|
||||
//
|
||||
// The JSON representation for `Empty` is empty JSON object `{}`.
|
||||
type Empty struct {
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Empty) Reset() { *m = Empty{} }
|
||||
func (m *Empty) String() string { return proto.CompactTextString(m) }
|
||||
func (*Empty) ProtoMessage() {}
|
||||
func (*Empty) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_900544acb223d5b8, []int{0}
|
||||
}
|
||||
|
||||
func (*Empty) XXX_WellKnownType() string { return "Empty" }
|
||||
|
||||
func (m *Empty) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Empty.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Empty.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Empty) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Empty.Merge(m, src)
|
||||
}
|
||||
func (m *Empty) XXX_Size() int {
|
||||
return xxx_messageInfo_Empty.Size(m)
|
||||
}
|
||||
func (m *Empty) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Empty.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Empty proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Empty)(nil), "google.protobuf.Empty")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor_900544acb223d5b8) }
|
||||
|
||||
var fileDescriptor_900544acb223d5b8 = []byte{
|
||||
// 148 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f,
|
||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcd, 0x2d, 0x28,
|
||||
0xa9, 0xd4, 0x03, 0x73, 0x85, 0xf8, 0x21, 0x92, 0x7a, 0x30, 0x49, 0x25, 0x76, 0x2e, 0x56, 0x57,
|
||||
0x90, 0xbc, 0x53, 0x19, 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0xbc, 0x13, 0x17, 0x58, 0x36,
|
||||
0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x52, 0x4f, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf,
|
||||
0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0x47, 0x58, 0x53, 0x50, 0x52, 0x59, 0x90, 0x5a, 0x0c,
|
||||
0xb1, 0xed, 0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10,
|
||||
0x13, 0x03, 0xa0, 0xea, 0xf4, 0xc2, 0x53, 0x73, 0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40,
|
||||
0xea, 0x93, 0xd8, 0xc0, 0x06, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x64, 0xd4, 0xb3, 0xa6,
|
||||
0xb7, 0x00, 0x00, 0x00,
|
||||
}
|
@ -0,0 +1,52 @@
|
||||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.protobuf;
|
||||
|
||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||
option go_package = "github.com/golang/protobuf/ptypes/empty";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "EmptyProto";
|
||||
option java_multiple_files = true;
|
||||
option objc_class_prefix = "GPB";
|
||||
option cc_enable_arenas = true;
|
||||
|
||||
// A generic empty message that you can re-use to avoid defining duplicated
|
||||
// empty messages in your APIs. A typical example is to use it as the request
|
||||
// or the response type of an API method. For instance:
|
||||
//
|
||||
// service Foo {
|
||||
// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
|
||||
// }
|
||||
//
|
||||
// The JSON representation for `Empty` is empty JSON object `{}`.
|
||||
message Empty {}
|
@ -0,0 +1,132 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package ptypes
|
||||
|
||||
// This file implements operations on google.protobuf.Timestamp.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
tspb "github.com/golang/protobuf/ptypes/timestamp"
|
||||
)
|
||||
|
||||
const (
|
||||
// Seconds field of the earliest valid Timestamp.
|
||||
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
||||
minValidSeconds = -62135596800
|
||||
// Seconds field just after the latest valid Timestamp.
|
||||
// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
||||
maxValidSeconds = 253402300800
|
||||
)
|
||||
|
||||
// validateTimestamp determines whether a Timestamp is valid.
|
||||
// A valid timestamp represents a time in the range
|
||||
// [0001-01-01, 10000-01-01) and has a Nanos field
|
||||
// in the range [0, 1e9).
|
||||
//
|
||||
// If the Timestamp is valid, validateTimestamp returns nil.
|
||||
// Otherwise, it returns an error that describes
|
||||
// the problem.
|
||||
//
|
||||
// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
|
||||
func validateTimestamp(ts *tspb.Timestamp) error {
|
||||
if ts == nil {
|
||||
return errors.New("timestamp: nil Timestamp")
|
||||
}
|
||||
if ts.Seconds < minValidSeconds {
|
||||
return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
|
||||
}
|
||||
if ts.Seconds >= maxValidSeconds {
|
||||
return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
|
||||
}
|
||||
if ts.Nanos < 0 || ts.Nanos >= 1e9 {
|
||||
return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Timestamp converts a google.protobuf.Timestamp proto to a time.Time.
|
||||
// It returns an error if the argument is invalid.
|
||||
//
|
||||
// Unlike most Go functions, if Timestamp returns an error, the first return value
|
||||
// is not the zero time.Time. Instead, it is the value obtained from the
|
||||
// time.Unix function when passed the contents of the Timestamp, in the UTC
|
||||
// locale. This may or may not be a meaningful time; many invalid Timestamps
|
||||
// do map to valid time.Times.
|
||||
//
|
||||
// A nil Timestamp returns an error. The first return value in that case is
|
||||
// undefined.
|
||||
func Timestamp(ts *tspb.Timestamp) (time.Time, error) {
|
||||
// Don't return the zero value on error, because corresponds to a valid
|
||||
// timestamp. Instead return whatever time.Unix gives us.
|
||||
var t time.Time
|
||||
if ts == nil {
|
||||
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
|
||||
} else {
|
||||
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
|
||||
}
|
||||
return t, validateTimestamp(ts)
|
||||
}
|
||||
|
||||
// TimestampNow returns a google.protobuf.Timestamp for the current time.
|
||||
func TimestampNow() *tspb.Timestamp {
|
||||
ts, err := TimestampProto(time.Now())
|
||||
if err != nil {
|
||||
panic("ptypes: time.Now() out of Timestamp range")
|
||||
}
|
||||
return ts
|
||||
}
|
||||
|
||||
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
|
||||
// It returns an error if the resulting Timestamp is invalid.
|
||||
func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
|
||||
ts := &tspb.Timestamp{
|
||||
Seconds: t.Unix(),
|
||||
Nanos: int32(t.Nanosecond()),
|
||||
}
|
||||
if err := validateTimestamp(ts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ts, nil
|
||||
}
|
||||
|
||||
// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid
|
||||
// Timestamps, it returns an error message in parentheses.
|
||||
func TimestampString(ts *tspb.Timestamp) string {
|
||||
t, err := Timestamp(ts)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("(%v)", err)
|
||||
}
|
||||
return t.Format(time.RFC3339Nano)
|
||||
}
|
@ -0,0 +1,179 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/protobuf/timestamp.proto
|
||||
|
||||
package timestamp
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// A Timestamp represents a point in time independent of any time zone
|
||||
// or calendar, represented as seconds and fractions of seconds at
|
||||
// nanosecond resolution in UTC Epoch time. It is encoded using the
|
||||
// Proleptic Gregorian Calendar which extends the Gregorian calendar
|
||||
// backwards to year one. It is encoded assuming all minutes are 60
|
||||
// seconds long, i.e. leap seconds are "smeared" so that no leap second
|
||||
// table is needed for interpretation. Range is from
|
||||
// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
|
||||
// By restricting to that range, we ensure that we can convert to
|
||||
// and from RFC 3339 date strings.
|
||||
// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
|
||||
//
|
||||
// # Examples
|
||||
//
|
||||
// Example 1: Compute Timestamp from POSIX `time()`.
|
||||
//
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds(time(NULL));
|
||||
// timestamp.set_nanos(0);
|
||||
//
|
||||
// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
|
||||
//
|
||||
// struct timeval tv;
|
||||
// gettimeofday(&tv, NULL);
|
||||
//
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds(tv.tv_sec);
|
||||
// timestamp.set_nanos(tv.tv_usec * 1000);
|
||||
//
|
||||
// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
|
||||
//
|
||||
// FILETIME ft;
|
||||
// GetSystemTimeAsFileTime(&ft);
|
||||
// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
|
||||
//
|
||||
// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
|
||||
// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
|
||||
// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
|
||||
//
|
||||
// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
|
||||
//
|
||||
// long millis = System.currentTimeMillis();
|
||||
//
|
||||
// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
|
||||
// .setNanos((int) ((millis % 1000) * 1000000)).build();
|
||||
//
|
||||
//
|
||||
// Example 5: Compute Timestamp from current time in Python.
|
||||
//
|
||||
// timestamp = Timestamp()
|
||||
// timestamp.GetCurrentTime()
|
||||
//
|
||||
// # JSON Mapping
|
||||
//
|
||||
// In JSON format, the Timestamp type is encoded as a string in the
|
||||
// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
|
||||
// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
|
||||
// where {year} is always expressed using four digits while {month}, {day},
|
||||
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
|
||||
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
|
||||
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
|
||||
// is required. A proto3 JSON serializer should always use UTC (as indicated by
|
||||
// "Z") when printing the Timestamp type and a proto3 JSON parser should be
|
||||
// able to accept both UTC and other timezones (as indicated by an offset).
|
||||
//
|
||||
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
|
||||
// 01:30 UTC on January 15, 2017.
|
||||
//
|
||||
// In JavaScript, one can convert a Date object to this format using the
|
||||
// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
|
||||
// method. In Python, a standard `datetime.datetime` object can be converted
|
||||
// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
|
||||
// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
|
||||
// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
|
||||
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
|
||||
// ) to obtain a formatter capable of generating timestamps in this format.
|
||||
//
|
||||
//
|
||||
type Timestamp struct {
|
||||
// Represents seconds of UTC time since Unix epoch
|
||||
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
|
||||
// 9999-12-31T23:59:59Z inclusive.
|
||||
Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
|
||||
// Non-negative fractions of a second at nanosecond resolution. Negative
|
||||
// second values with fractions must still have non-negative nanos values
|
||||
// that count forward in time. Must be from 0 to 999,999,999
|
||||
// inclusive.
|
||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Timestamp) Reset() { *m = Timestamp{} }
|
||||
func (m *Timestamp) String() string { return proto.CompactTextString(m) }
|
||||
func (*Timestamp) ProtoMessage() {}
|
||||
func (*Timestamp) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_292007bbfe81227e, []int{0}
|
||||
}
|
||||
|
||||
func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
|
||||
|
||||
func (m *Timestamp) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Timestamp.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Timestamp) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Timestamp.Merge(m, src)
|
||||
}
|
||||
func (m *Timestamp) XXX_Size() int {
|
||||
return xxx_messageInfo_Timestamp.Size(m)
|
||||
}
|
||||
func (m *Timestamp) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Timestamp.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Timestamp proto.InternalMessageInfo
|
||||
|
||||
func (m *Timestamp) GetSeconds() int64 {
|
||||
if m != nil {
|
||||
return m.Seconds
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Timestamp) GetNanos() int32 {
|
||||
if m != nil {
|
||||
return m.Nanos
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) }
|
||||
|
||||
var fileDescriptor_292007bbfe81227e = []byte{
|
||||
// 191 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
|
||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,
|
||||
0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28,
|
||||
0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5,
|
||||
0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89,
|
||||
0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70,
|
||||
0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51,
|
||||
0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89,
|
||||
0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71,
|
||||
0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a,
|
||||
0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43,
|
||||
0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00,
|
||||
}
|
@ -0,0 +1,135 @@
|
||||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.protobuf;
|
||||
|
||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||
option cc_enable_arenas = true;
|
||||
option go_package = "github.com/golang/protobuf/ptypes/timestamp";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "TimestampProto";
|
||||
option java_multiple_files = true;
|
||||
option objc_class_prefix = "GPB";
|
||||
|
||||
// A Timestamp represents a point in time independent of any time zone
|
||||
// or calendar, represented as seconds and fractions of seconds at
|
||||
// nanosecond resolution in UTC Epoch time. It is encoded using the
|
||||
// Proleptic Gregorian Calendar which extends the Gregorian calendar
|
||||
// backwards to year one. It is encoded assuming all minutes are 60
|
||||
// seconds long, i.e. leap seconds are "smeared" so that no leap second
|
||||
// table is needed for interpretation. Range is from
|
||||
// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
|
||||
// By restricting to that range, we ensure that we can convert to
|
||||
// and from RFC 3339 date strings.
|
||||
// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
|
||||
//
|
||||
// # Examples
|
||||
//
|
||||
// Example 1: Compute Timestamp from POSIX `time()`.
|
||||
//
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds(time(NULL));
|
||||
// timestamp.set_nanos(0);
|
||||
//
|
||||
// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
|
||||
//
|
||||
// struct timeval tv;
|
||||
// gettimeofday(&tv, NULL);
|
||||
//
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds(tv.tv_sec);
|
||||
// timestamp.set_nanos(tv.tv_usec * 1000);
|
||||
//
|
||||
// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
|
||||
//
|
||||
// FILETIME ft;
|
||||
// GetSystemTimeAsFileTime(&ft);
|
||||
// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
|
||||
//
|
||||
// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
|
||||
// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
|
||||
// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
|
||||
//
|
||||
// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
|
||||
//
|
||||
// long millis = System.currentTimeMillis();
|
||||
//
|
||||
// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
|
||||
// .setNanos((int) ((millis % 1000) * 1000000)).build();
|
||||
//
|
||||
//
|
||||
// Example 5: Compute Timestamp from current time in Python.
|
||||
//
|
||||
// timestamp = Timestamp()
|
||||
// timestamp.GetCurrentTime()
|
||||
//
|
||||
// # JSON Mapping
|
||||
//
|
||||
// In JSON format, the Timestamp type is encoded as a string in the
|
||||
// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
|
||||
// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
|
||||
// where {year} is always expressed using four digits while {month}, {day},
|
||||
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
|
||||
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
|
||||
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
|
||||
// is required. A proto3 JSON serializer should always use UTC (as indicated by
|
||||
// "Z") when printing the Timestamp type and a proto3 JSON parser should be
|
||||
// able to accept both UTC and other timezones (as indicated by an offset).
|
||||
//
|
||||
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
|
||||
// 01:30 UTC on January 15, 2017.
|
||||
//
|
||||
// In JavaScript, one can convert a Date object to this format using the
|
||||
// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
|
||||
// method. In Python, a standard `datetime.datetime` object can be converted
|
||||
// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
|
||||
// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
|
||||
// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
|
||||
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
|
||||
// ) to obtain a formatter capable of generating timestamps in this format.
|
||||
//
|
||||
//
|
||||
message Timestamp {
|
||||
|
||||
// Represents seconds of UTC time since Unix epoch
|
||||
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
|
||||
// 9999-12-31T23:59:59Z inclusive.
|
||||
int64 seconds = 1;
|
||||
|
||||
// Non-negative fractions of a second at nanosecond resolution. Negative
|
||||
// second values with fractions must still have non-negative nanos values
|
||||
// that count forward in time. Must be from 0 to 999,999,999
|
||||
// inclusive.
|
||||
int32 nanos = 2;
|
||||
}
|
@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
79
chaincode/vendor/github.com/hyperledger/fabric-chaincode-go/shim/chaincodeserver.go
generated
vendored
79
chaincode/vendor/github.com/hyperledger/fabric-chaincode-go/shim/chaincodeserver.go
generated
vendored
@ -0,0 +1,79 @@
|
||||
// Copyright the Hyperledger Fabric contributors. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package shim
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
|
||||
"github.com/hyperledger/fabric-chaincode-go/shim/internal"
|
||||
pb "github.com/hyperledger/fabric-protos-go/peer"
|
||||
|
||||
"google.golang.org/grpc/keepalive"
|
||||
)
|
||||
|
||||
// TLSProperties passed to ChaincodeServer
|
||||
type TLSProperties struct {
|
||||
//Disabled forces default to be TLS enabled
|
||||
Disabled bool
|
||||
Key []byte
|
||||
Cert []byte
|
||||
// ClientCACerts set if connecting peer should be verified
|
||||
ClientCACerts []byte
|
||||
}
|
||||
|
||||
// ChaincodeServer encapsulates basic properties needed for a chaincode server
|
||||
type ChaincodeServer struct {
|
||||
// CCID should match chaincode's package name on peer
|
||||
CCID string
|
||||
// Addesss is the listen address of the chaincode server
|
||||
Address string
|
||||
// CC is the chaincode that handles Init and Invoke
|
||||
CC Chaincode
|
||||
// TLSProps is the TLS properties passed to chaincode server
|
||||
TLSProps TLSProperties
|
||||
// KaOpts keepalive options, sensible defaults provided if nil
|
||||
KaOpts *keepalive.ServerParameters
|
||||
}
|
||||
|
||||
// Connect the bidi stream entry point called by chaincode to register with the Peer.
|
||||
func (cs *ChaincodeServer) Connect(stream pb.Chaincode_ConnectServer) error {
|
||||
return chatWithPeer(cs.CCID, stream, cs.CC)
|
||||
}
|
||||
|
||||
// Start the server
|
||||
func (cs *ChaincodeServer) Start() error {
|
||||
if cs.CCID == "" {
|
||||
return errors.New("ccid must be specified")
|
||||
}
|
||||
|
||||
if cs.Address == "" {
|
||||
return errors.New("address must be specified")
|
||||
}
|
||||
|
||||
if cs.CC == nil {
|
||||
return errors.New("chaincode must be specified")
|
||||
}
|
||||
|
||||
var tlsCfg *tls.Config
|
||||
var err error
|
||||
if !cs.TLSProps.Disabled {
|
||||
tlsCfg, err = internal.LoadTLSConfig(true, cs.TLSProps.Key, cs.TLSProps.Cert, cs.TLSProps.ClientCACerts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// create listener and grpc server
|
||||
server, err := internal.NewServer(cs.Address, tlsCfg, cs.KaOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// register the server with grpc ...
|
||||
pb.RegisterChaincodeServer(server.Server, cs)
|
||||
|
||||
// ... and start
|
||||
return server.Start()
|
||||
}
|
@ -0,0 +1,685 @@
|
||||
// Copyright the Hyperledger Fabric contributors. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package shim
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
pb "github.com/hyperledger/fabric-protos-go/peer"
|
||||
)
|
||||
|
||||
type state string
|
||||
|
||||
const (
|
||||
created state = "created" // start state
|
||||
established state = "established" // connection established
|
||||
ready state = "ready" // ready for requests
|
||||
)
|
||||
|
||||
// PeerChaincodeStream is the common stream interface for Peer - chaincode communication.
|
||||
// Both chaincode-as-server and chaincode-as-client patterns need to support this
|
||||
type PeerChaincodeStream interface {
|
||||
Send(*pb.ChaincodeMessage) error
|
||||
Recv() (*pb.ChaincodeMessage, error)
|
||||
}
|
||||
|
||||
// ClientStream supports the (original) chaincode-as-client interaction pattern
|
||||
type ClientStream interface {
|
||||
PeerChaincodeStream
|
||||
CloseSend() error
|
||||
}
|
||||
|
||||
// Handler handler implementation for shim side of chaincode.
|
||||
type Handler struct {
|
||||
// serialLock is used to prevent concurrent calls to Send on the
|
||||
// PeerChaincodeStream. This is required by gRPC.
|
||||
serialLock sync.Mutex
|
||||
// chatStream is the client used to access the chaincode support server on
|
||||
// the peer.
|
||||
chatStream PeerChaincodeStream
|
||||
|
||||
// cc is the chaincode associated with this handler.
|
||||
cc Chaincode
|
||||
// state holds the current state of this handler.
|
||||
state state
|
||||
|
||||
// Multiple queries (and one transaction) with different txids can be executing in parallel for this chaincode
|
||||
// responseChannels is the channel on which responses are communicated by the shim to the chaincodeStub.
|
||||
// need lock to protect chaincode from attempting
|
||||
// concurrent requests to the peer
|
||||
responseChannelsMutex sync.Mutex
|
||||
responseChannels map[string]chan pb.ChaincodeMessage
|
||||
}
|
||||
|
||||
func shorttxid(txid string) string {
|
||||
if len(txid) < 8 {
|
||||
return txid
|
||||
}
|
||||
return txid[0:8]
|
||||
}
|
||||
|
||||
// serialSend serializes calls to Send on the gRPC client.
|
||||
func (h *Handler) serialSend(msg *pb.ChaincodeMessage) error {
|
||||
h.serialLock.Lock()
|
||||
defer h.serialLock.Unlock()
|
||||
|
||||
return h.chatStream.Send(msg)
|
||||
}
|
||||
|
||||
// serialSendAsync sends the provided message asynchronously in a separate
|
||||
// goroutine. The result of the send is communicated back to the caller via
|
||||
// errc.
|
||||
func (h *Handler) serialSendAsync(msg *pb.ChaincodeMessage, errc chan<- error) {
|
||||
go func() {
|
||||
errc <- h.serialSend(msg)
|
||||
}()
|
||||
}
|
||||
|
||||
// transactionContextID builds a transaction context identifier by
|
||||
// concatenating a channel ID and a transaction ID.
|
||||
func transactionContextID(chainID, txid string) string {
|
||||
return chainID + txid
|
||||
}
|
||||
|
||||
func (h *Handler) createResponseChannel(channelID, txid string) (<-chan pb.ChaincodeMessage, error) {
|
||||
h.responseChannelsMutex.Lock()
|
||||
defer h.responseChannelsMutex.Unlock()
|
||||
|
||||
if h.responseChannels == nil {
|
||||
return nil, fmt.Errorf("[%s] cannot create response channel", shorttxid(txid))
|
||||
}
|
||||
|
||||
txCtxID := transactionContextID(channelID, txid)
|
||||
if h.responseChannels[txCtxID] != nil {
|
||||
return nil, fmt.Errorf("[%s] channel exists", shorttxid(txCtxID))
|
||||
}
|
||||
|
||||
responseChan := make(chan pb.ChaincodeMessage)
|
||||
h.responseChannels[txCtxID] = responseChan
|
||||
return responseChan, nil
|
||||
}
|
||||
|
||||
func (h *Handler) deleteResponseChannel(channelID, txid string) {
|
||||
h.responseChannelsMutex.Lock()
|
||||
defer h.responseChannelsMutex.Unlock()
|
||||
if h.responseChannels != nil {
|
||||
txCtxID := transactionContextID(channelID, txid)
|
||||
delete(h.responseChannels, txCtxID)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Handler) handleResponse(msg *pb.ChaincodeMessage) error {
|
||||
h.responseChannelsMutex.Lock()
|
||||
defer h.responseChannelsMutex.Unlock()
|
||||
|
||||
if h.responseChannels == nil {
|
||||
return fmt.Errorf("[%s] Cannot send message response channel", shorttxid(msg.Txid))
|
||||
}
|
||||
|
||||
txCtxID := transactionContextID(msg.ChannelId, msg.Txid)
|
||||
responseCh := h.responseChannels[txCtxID]
|
||||
if responseCh == nil {
|
||||
return fmt.Errorf("[%s] responseChannel does not exist", shorttxid(msg.Txid))
|
||||
}
|
||||
responseCh <- *msg
|
||||
return nil
|
||||
}
|
||||
|
||||
// sendReceive sends msg to the peer and waits for the response to arrive on
|
||||
// the provided responseChan. On success, the response message will be
|
||||
// returned. An error will be returned msg was not successfully sent to the
|
||||
// peer.
|
||||
func (h *Handler) sendReceive(msg *pb.ChaincodeMessage, responseChan <-chan pb.ChaincodeMessage) (pb.ChaincodeMessage, error) {
|
||||
err := h.serialSend(msg)
|
||||
if err != nil {
|
||||
return pb.ChaincodeMessage{}, err
|
||||
}
|
||||
|
||||
outmsg := <-responseChan
|
||||
return outmsg, nil
|
||||
}
|
||||
|
||||
// NewChaincodeHandler returns a new instance of the shim side handler.
|
||||
func newChaincodeHandler(peerChatStream PeerChaincodeStream, chaincode Chaincode) *Handler {
|
||||
return &Handler{
|
||||
chatStream: peerChatStream,
|
||||
cc: chaincode,
|
||||
responseChannels: map[string]chan pb.ChaincodeMessage{},
|
||||
state: created,
|
||||
}
|
||||
}
|
||||
|
||||
type stubHandlerFunc func(*pb.ChaincodeMessage) (*pb.ChaincodeMessage, error)
|
||||
|
||||
func (h *Handler) handleStubInteraction(handler stubHandlerFunc, msg *pb.ChaincodeMessage, errc chan<- error) {
|
||||
resp, err := handler(msg)
|
||||
if err != nil {
|
||||
resp = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_ERROR, Payload: []byte(err.Error()), Txid: msg.Txid, ChannelId: msg.ChannelId}
|
||||
}
|
||||
h.serialSendAsync(resp, errc)
|
||||
}
|
||||
|
||||
// handleInit calls the Init function of the associated chaincode.
|
||||
func (h *Handler) handleInit(msg *pb.ChaincodeMessage) (*pb.ChaincodeMessage, error) {
|
||||
// Get the function and args from Payload
|
||||
input := &pb.ChaincodeInput{}
|
||||
err := proto.Unmarshal(msg.Payload, input)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal input: %s", err)
|
||||
}
|
||||
|
||||
// Create the ChaincodeStub which the chaincode can use to callback
|
||||
stub, err := newChaincodeStub(h, msg.ChannelId, msg.Txid, input, msg.Proposal)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create new ChaincodeStub: %s", err)
|
||||
}
|
||||
|
||||
res := h.cc.Init(stub)
|
||||
if res.Status >= ERROR {
|
||||
return &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_ERROR, Payload: []byte(res.Message), Txid: msg.Txid, ChaincodeEvent: stub.chaincodeEvent, ChannelId: msg.ChannelId}, nil
|
||||
}
|
||||
|
||||
resBytes, err := proto.Marshal(&res)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal response: %s", err)
|
||||
}
|
||||
|
||||
return &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_COMPLETED, Payload: resBytes, Txid: msg.Txid, ChaincodeEvent: stub.chaincodeEvent, ChannelId: stub.ChannelID}, nil
|
||||
}
|
||||
|
||||
// handleTransaction calls Invoke on the associated chaincode.
|
||||
func (h *Handler) handleTransaction(msg *pb.ChaincodeMessage) (*pb.ChaincodeMessage, error) {
|
||||
// Get the function and args from Payload
|
||||
input := &pb.ChaincodeInput{}
|
||||
err := proto.Unmarshal(msg.Payload, input)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal input: %s", err)
|
||||
}
|
||||
|
||||
// Create the ChaincodeStub which the chaincode can use to callback
|
||||
stub, err := newChaincodeStub(h, msg.ChannelId, msg.Txid, input, msg.Proposal)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create new ChaincodeStub: %s", err)
|
||||
}
|
||||
|
||||
res := h.cc.Invoke(stub)
|
||||
|
||||
// Endorser will handle error contained in Response.
|
||||
resBytes, err := proto.Marshal(&res)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal response: %s", err)
|
||||
}
|
||||
|
||||
return &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_COMPLETED, Payload: resBytes, Txid: msg.Txid, ChaincodeEvent: stub.chaincodeEvent, ChannelId: stub.ChannelID}, nil
|
||||
}
|
||||
|
||||
// callPeerWithChaincodeMsg sends a chaincode message to the peer for the given
|
||||
// txid and channel and receives the response.
|
||||
func (h *Handler) callPeerWithChaincodeMsg(msg *pb.ChaincodeMessage, channelID, txid string) (pb.ChaincodeMessage, error) {
|
||||
// Create the channel on which to communicate the response from the peer
|
||||
respChan, err := h.createResponseChannel(channelID, txid)
|
||||
if err != nil {
|
||||
return pb.ChaincodeMessage{}, err
|
||||
}
|
||||
defer h.deleteResponseChannel(channelID, txid)
|
||||
|
||||
return h.sendReceive(msg, respChan)
|
||||
}
|
||||
|
||||
// handleGetState communicates with the peer to fetch the requested state information from the ledger.
|
||||
func (h *Handler) handleGetState(collection string, key string, channelID string, txid string) ([]byte, error) {
|
||||
// Construct payload for GET_STATE
|
||||
payloadBytes := marshalOrPanic(&pb.GetState{Collection: collection, Key: key})
|
||||
|
||||
msg := &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_GET_STATE, Payload: payloadBytes, Txid: txid, ChannelId: channelID}
|
||||
responseMsg, err := h.callPeerWithChaincodeMsg(msg, channelID, txid)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("[%s] error sending %s: %s", shorttxid(txid), pb.ChaincodeMessage_GET_STATE, err)
|
||||
}
|
||||
|
||||
if responseMsg.Type == pb.ChaincodeMessage_RESPONSE {
|
||||
// Success response
|
||||
return responseMsg.Payload, nil
|
||||
}
|
||||
if responseMsg.Type == pb.ChaincodeMessage_ERROR {
|
||||
// Error response
|
||||
return nil, fmt.Errorf("%s", responseMsg.Payload[:])
|
||||
}
|
||||
|
||||
// Incorrect chaincode message received
|
||||
return nil, fmt.Errorf("[%s] incorrect chaincode message %s received. Expecting %s or %s", shorttxid(responseMsg.Txid), responseMsg.Type, pb.ChaincodeMessage_RESPONSE, pb.ChaincodeMessage_ERROR)
|
||||
}
|
||||
|
||||
func (h *Handler) handleGetPrivateDataHash(collection string, key string, channelID string, txid string) ([]byte, error) {
|
||||
// Construct payload for GET_PRIVATE_DATA_HASH
|
||||
payloadBytes := marshalOrPanic(&pb.GetState{Collection: collection, Key: key})
|
||||
|
||||
msg := &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_GET_PRIVATE_DATA_HASH, Payload: payloadBytes, Txid: txid, ChannelId: channelID}
|
||||
responseMsg, err := h.callPeerWithChaincodeMsg(msg, channelID, txid)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("[%s] error sending %s: %s", shorttxid(txid), pb.ChaincodeMessage_GET_PRIVATE_DATA_HASH, err)
|
||||
}
|
||||
|
||||
if responseMsg.Type == pb.ChaincodeMessage_RESPONSE {
|
||||
// Success response
|
||||
return responseMsg.Payload, nil
|
||||
}
|
||||
if responseMsg.Type == pb.ChaincodeMessage_ERROR {
|
||||
// Error response
|
||||
return nil, fmt.Errorf("%s", responseMsg.Payload[:])
|
||||
}
|
||||
|
||||
// Incorrect chaincode message received
|
||||
return nil, fmt.Errorf("[%s] incorrect chaincode message %s received. Expecting %s or %s", shorttxid(responseMsg.Txid), responseMsg.Type, pb.ChaincodeMessage_RESPONSE, pb.ChaincodeMessage_ERROR)
|
||||
}
|
||||
|
||||
func (h *Handler) handleGetStateMetadata(collection string, key string, channelID string, txID string) (map[string][]byte, error) {
|
||||
// Construct payload for GET_STATE_METADATA
|
||||
payloadBytes := marshalOrPanic(&pb.GetStateMetadata{Collection: collection, Key: key})
|
||||
|
||||
msg := &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_GET_STATE_METADATA, Payload: payloadBytes, Txid: txID, ChannelId: channelID}
|
||||
responseMsg, err := h.callPeerWithChaincodeMsg(msg, channelID, txID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("[%s] error sending %s: %s", shorttxid(txID), pb.ChaincodeMessage_GET_STATE_METADATA, err)
|
||||
}
|
||||
|
||||
if responseMsg.Type == pb.ChaincodeMessage_RESPONSE {
|
||||
// Success response
|
||||
var mdResult pb.StateMetadataResult
|
||||
err := proto.Unmarshal(responseMsg.Payload, &mdResult)
|
||||
if err != nil {
|
||||
return nil, errors.New("Could not unmarshal metadata response")
|
||||
}
|
||||
metadata := make(map[string][]byte)
|
||||
for _, md := range mdResult.Entries {
|
||||
metadata[md.Metakey] = md.Value
|
||||
}
|
||||
|
||||
return metadata, nil
|
||||
}
|
||||
if responseMsg.Type == pb.ChaincodeMessage_ERROR {
|
||||
// Error response
|
||||
return nil, fmt.Errorf("%s", responseMsg.Payload[:])
|
||||
}
|
||||
|
||||
// Incorrect chaincode message received
|
||||
return nil, fmt.Errorf("[%s] incorrect chaincode message %s received. Expecting %s or %s", shorttxid(responseMsg.Txid), responseMsg.Type, pb.ChaincodeMessage_RESPONSE, pb.ChaincodeMessage_ERROR)
|
||||
}
|
||||
|
||||
// handlePutState communicates with the peer to put state information into the ledger.
|
||||
func (h *Handler) handlePutState(collection string, key string, value []byte, channelID string, txid string) error {
|
||||
// Construct payload for PUT_STATE
|
||||
payloadBytes := marshalOrPanic(&pb.PutState{Collection: collection, Key: key, Value: value})
|
||||
|
||||
msg := &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_PUT_STATE, Payload: payloadBytes, Txid: txid, ChannelId: channelID}
|
||||
|
||||
// Execute the request and get response
|
||||
responseMsg, err := h.callPeerWithChaincodeMsg(msg, channelID, txid)
|
||||
if err != nil {
|
||||
return fmt.Errorf("[%s] error sending %s: %s", msg.Txid, pb.ChaincodeMessage_PUT_STATE, err)
|
||||
}
|
||||
|
||||
if responseMsg.Type == pb.ChaincodeMessage_RESPONSE {
|
||||
// Success response
|
||||
return nil
|
||||
}
|
||||
|
||||
if responseMsg.Type == pb.ChaincodeMessage_ERROR {
|
||||
// Error response
|
||||
return fmt.Errorf("%s", responseMsg.Payload[:])
|
||||
}
|
||||
|
||||
// Incorrect chaincode message received
|
||||
return fmt.Errorf("[%s] incorrect chaincode message %s received. Expecting %s or %s", shorttxid(responseMsg.Txid), responseMsg.Type, pb.ChaincodeMessage_RESPONSE, pb.ChaincodeMessage_ERROR)
|
||||
}
|
||||
|
||||
func (h *Handler) handlePutStateMetadataEntry(collection string, key string, metakey string, metadata []byte, channelID string, txID string) error {
|
||||
// Construct payload for PUT_STATE_METADATA
|
||||
md := &pb.StateMetadata{Metakey: metakey, Value: metadata}
|
||||
payloadBytes := marshalOrPanic(&pb.PutStateMetadata{Collection: collection, Key: key, Metadata: md})
|
||||
|
||||
msg := &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_PUT_STATE_METADATA, Payload: payloadBytes, Txid: txID, ChannelId: channelID}
|
||||
// Execute the request and get response
|
||||
responseMsg, err := h.callPeerWithChaincodeMsg(msg, channelID, txID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("[%s] error sending %s: %s", msg.Txid, pb.ChaincodeMessage_PUT_STATE_METADATA, err)
|
||||
}
|
||||
|
||||
if responseMsg.Type == pb.ChaincodeMessage_RESPONSE {
|
||||
// Success response
|
||||
return nil
|
||||
}
|
||||
|
||||
if responseMsg.Type == pb.ChaincodeMessage_ERROR {
|
||||
// Error response
|
||||
return fmt.Errorf("%s", responseMsg.Payload[:])
|
||||
}
|
||||
|
||||
// Incorrect chaincode message received
|
||||
return fmt.Errorf("[%s]incorrect chaincode message %s received. Expecting %s or %s", shorttxid(responseMsg.Txid), responseMsg.Type, pb.ChaincodeMessage_RESPONSE, pb.ChaincodeMessage_ERROR)
|
||||
}
|
||||
|
||||
// handleDelState communicates with the peer to delete a key from the state in the ledger.
|
||||
func (h *Handler) handleDelState(collection string, key string, channelID string, txid string) error {
|
||||
payloadBytes := marshalOrPanic(&pb.DelState{Collection: collection, Key: key})
|
||||
msg := &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_DEL_STATE, Payload: payloadBytes, Txid: txid, ChannelId: channelID}
|
||||
// Execute the request and get response
|
||||
responseMsg, err := h.callPeerWithChaincodeMsg(msg, channelID, txid)
|
||||
if err != nil {
|
||||
return fmt.Errorf("[%s] error sending %s", shorttxid(msg.Txid), pb.ChaincodeMessage_DEL_STATE)
|
||||
}
|
||||
|
||||
if responseMsg.Type == pb.ChaincodeMessage_RESPONSE {
|
||||
// Success response
|
||||
return nil
|
||||
}
|
||||
if responseMsg.Type == pb.ChaincodeMessage_ERROR {
|
||||
// Error response
|
||||
return fmt.Errorf("%s", responseMsg.Payload[:])
|
||||
}
|
||||
|
||||
// Incorrect chaincode message received
|
||||
return fmt.Errorf("[%s] incorrect chaincode message %s received. Expecting %s or %s", shorttxid(responseMsg.Txid), responseMsg.Type, pb.ChaincodeMessage_RESPONSE, pb.ChaincodeMessage_ERROR)
|
||||
}
|
||||
|
||||
func (h *Handler) handleGetStateByRange(collection, startKey, endKey string, metadata []byte,
|
||||
channelID string, txid string) (*pb.QueryResponse, error) {
|
||||
// Send GET_STATE_BY_RANGE message to peer chaincode support
|
||||
payloadBytes := marshalOrPanic(&pb.GetStateByRange{Collection: collection, StartKey: startKey, EndKey: endKey, Metadata: metadata})
|
||||
msg := &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_GET_STATE_BY_RANGE, Payload: payloadBytes, Txid: txid, ChannelId: channelID}
|
||||
responseMsg, err := h.callPeerWithChaincodeMsg(msg, channelID, txid)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("[%s] error sending %s", shorttxid(msg.Txid), pb.ChaincodeMessage_GET_STATE_BY_RANGE)
|
||||
}
|
||||
|
||||
if responseMsg.Type == pb.ChaincodeMessage_RESPONSE {
|
||||
// Success response
|
||||
rangeQueryResponse := &pb.QueryResponse{}
|
||||
err = proto.Unmarshal(responseMsg.Payload, rangeQueryResponse)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("[%s] GetStateByRangeResponse unmarshall error", shorttxid(responseMsg.Txid))
|
||||
}
|
||||
|
||||
return rangeQueryResponse, nil
|
||||
}
|
||||
if responseMsg.Type == pb.ChaincodeMessage_ERROR {
|
||||
// Error response
|
||||
return nil, fmt.Errorf("%s", responseMsg.Payload[:])
|
||||
}
|
||||
|
||||
// Incorrect chaincode message received
|
||||
return nil, fmt.Errorf("incorrect chaincode message %s received. Expecting %s or %s", responseMsg.Type, pb.ChaincodeMessage_RESPONSE, pb.ChaincodeMessage_ERROR)
|
||||
}
|
||||
|
||||
func (h *Handler) handleQueryStateNext(id, channelID, txid string) (*pb.QueryResponse, error) {
|
||||
// Create the channel on which to communicate the response from validating peer
|
||||
respChan, err := h.createResponseChannel(channelID, txid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer h.deleteResponseChannel(channelID, txid)
|
||||
|
||||
// Send QUERY_STATE_NEXT message to peer chaincode support
|
||||
payloadBytes := marshalOrPanic(&pb.QueryStateNext{Id: id})
|
||||
|
||||
msg := &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_QUERY_STATE_NEXT, Payload: payloadBytes, Txid: txid, ChannelId: channelID}
|
||||
|
||||
var responseMsg pb.ChaincodeMessage
|
||||
|
||||
if responseMsg, err = h.sendReceive(msg, respChan); err != nil {
|
||||
return nil, fmt.Errorf("[%s] error sending %s", shorttxid(msg.Txid), pb.ChaincodeMessage_QUERY_STATE_NEXT)
|
||||
}
|
||||
|
||||
if responseMsg.Type == pb.ChaincodeMessage_RESPONSE {
|
||||
// Success response
|
||||
queryResponse := &pb.QueryResponse{}
|
||||
if err = proto.Unmarshal(responseMsg.Payload, queryResponse); err != nil {
|
||||
return nil, fmt.Errorf("[%s] unmarshal error", shorttxid(responseMsg.Txid))
|
||||
}
|
||||
|
||||
return queryResponse, nil
|
||||
}
|
||||
if responseMsg.Type == pb.ChaincodeMessage_ERROR {
|
||||
// Error response
|
||||
return nil, fmt.Errorf("%s", responseMsg.Payload[:])
|
||||
}
|
||||
|
||||
// Incorrect chaincode message received
|
||||
return nil, fmt.Errorf("incorrect chaincode message %s received. Expecting %s or %s", responseMsg.Type, pb.ChaincodeMessage_RESPONSE, pb.ChaincodeMessage_ERROR)
|
||||
}
|
||||
|
||||
func (h *Handler) handleQueryStateClose(id, channelID, txid string) (*pb.QueryResponse, error) {
|
||||
// Create the channel on which to communicate the response from validating peer
|
||||
respChan, err := h.createResponseChannel(channelID, txid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer h.deleteResponseChannel(channelID, txid)
|
||||
|
||||
// Send QUERY_STATE_CLOSE message to peer chaincode support
|
||||
payloadBytes := marshalOrPanic(&pb.QueryStateClose{Id: id})
|
||||
|
||||
msg := &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_QUERY_STATE_CLOSE, Payload: payloadBytes, Txid: txid, ChannelId: channelID}
|
||||
|
||||
var responseMsg pb.ChaincodeMessage
|
||||
|
||||
if responseMsg, err = h.sendReceive(msg, respChan); err != nil {
|
||||
return nil, fmt.Errorf("[%s] error sending %s", shorttxid(msg.Txid), pb.ChaincodeMessage_QUERY_STATE_CLOSE)
|
||||
}
|
||||
|
||||
if responseMsg.Type == pb.ChaincodeMessage_RESPONSE {
|
||||
// Success response
|
||||
queryResponse := &pb.QueryResponse{}
|
||||
if err = proto.Unmarshal(responseMsg.Payload, queryResponse); err != nil {
|
||||
return nil, fmt.Errorf("[%s] unmarshal error", shorttxid(responseMsg.Txid))
|
||||
}
|
||||
|
||||
return queryResponse, nil
|
||||
}
|
||||
if responseMsg.Type == pb.ChaincodeMessage_ERROR {
|
||||
// Error response
|
||||
return nil, fmt.Errorf("%s", responseMsg.Payload[:])
|
||||
}
|
||||
|
||||
// Incorrect chaincode message received
|
||||
return nil, fmt.Errorf("incorrect chaincode message %s received. Expecting %s or %s", responseMsg.Type, pb.ChaincodeMessage_RESPONSE, pb.ChaincodeMessage_ERROR)
|
||||
}
|
||||
|
||||
func (h *Handler) handleGetQueryResult(collection string, query string, metadata []byte,
|
||||
channelID string, txid string) (*pb.QueryResponse, error) {
|
||||
// Send GET_QUERY_RESULT message to peer chaincode support
|
||||
payloadBytes := marshalOrPanic(&pb.GetQueryResult{Collection: collection, Query: query, Metadata: metadata})
|
||||
msg := &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_GET_QUERY_RESULT, Payload: payloadBytes, Txid: txid, ChannelId: channelID}
|
||||
responseMsg, err := h.callPeerWithChaincodeMsg(msg, channelID, txid)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("[%s] error sending %s", shorttxid(msg.Txid), pb.ChaincodeMessage_GET_QUERY_RESULT)
|
||||
}
|
||||
|
||||
if responseMsg.Type == pb.ChaincodeMessage_RESPONSE {
|
||||
// Success response
|
||||
executeQueryResponse := &pb.QueryResponse{}
|
||||
if err = proto.Unmarshal(responseMsg.Payload, executeQueryResponse); err != nil {
|
||||
return nil, fmt.Errorf("[%s] unmarshal error", shorttxid(responseMsg.Txid))
|
||||
}
|
||||
|
||||
return executeQueryResponse, nil
|
||||
}
|
||||
if responseMsg.Type == pb.ChaincodeMessage_ERROR {
|
||||
// Error response
|
||||
return nil, fmt.Errorf("%s", responseMsg.Payload[:])
|
||||
}
|
||||
|
||||
// Incorrect chaincode message received
|
||||
return nil, fmt.Errorf("incorrect chaincode message %s received. Expecting %s or %s", responseMsg.Type, pb.ChaincodeMessage_RESPONSE, pb.ChaincodeMessage_ERROR)
|
||||
}
|
||||
|
||||
func (h *Handler) handleGetHistoryForKey(key string, channelID string, txid string) (*pb.QueryResponse, error) {
|
||||
// Create the channel on which to communicate the response from validating peer
|
||||
respChan, err := h.createResponseChannel(channelID, txid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer h.deleteResponseChannel(channelID, txid)
|
||||
|
||||
// Send GET_HISTORY_FOR_KEY message to peer chaincode support
|
||||
payloadBytes := marshalOrPanic(&pb.GetHistoryForKey{Key: key})
|
||||
|
||||
msg := &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_GET_HISTORY_FOR_KEY, Payload: payloadBytes, Txid: txid, ChannelId: channelID}
|
||||
var responseMsg pb.ChaincodeMessage
|
||||
|
||||
if responseMsg, err = h.sendReceive(msg, respChan); err != nil {
|
||||
return nil, fmt.Errorf("[%s] error sending %s", shorttxid(msg.Txid), pb.ChaincodeMessage_GET_HISTORY_FOR_KEY)
|
||||
}
|
||||
|
||||
if responseMsg.Type == pb.ChaincodeMessage_RESPONSE {
|
||||
// Success response
|
||||
getHistoryForKeyResponse := &pb.QueryResponse{}
|
||||
if err = proto.Unmarshal(responseMsg.Payload, getHistoryForKeyResponse); err != nil {
|
||||
return nil, fmt.Errorf("[%s] unmarshal error", shorttxid(responseMsg.Txid))
|
||||
}
|
||||
|
||||
return getHistoryForKeyResponse, nil
|
||||
}
|
||||
if responseMsg.Type == pb.ChaincodeMessage_ERROR {
|
||||
// Error response
|
||||
return nil, fmt.Errorf("%s", responseMsg.Payload[:])
|
||||
}
|
||||
|
||||
// Incorrect chaincode message received
|
||||
return nil, fmt.Errorf("incorrect chaincode message %s received. Expecting %s or %s", responseMsg.Type, pb.ChaincodeMessage_RESPONSE, pb.ChaincodeMessage_ERROR)
|
||||
}
|
||||
|
||||
func (h *Handler) createResponse(status int32, payload []byte) pb.Response {
|
||||
return pb.Response{Status: status, Payload: payload}
|
||||
}
|
||||
|
||||
// handleInvokeChaincode communicates with the peer to invoke another chaincode.
|
||||
func (h *Handler) handleInvokeChaincode(chaincodeName string, args [][]byte, channelID string, txid string) pb.Response {
|
||||
payloadBytes := marshalOrPanic(&pb.ChaincodeSpec{ChaincodeId: &pb.ChaincodeID{Name: chaincodeName}, Input: &pb.ChaincodeInput{Args: args}})
|
||||
|
||||
// Create the channel on which to communicate the response from validating peer
|
||||
respChan, err := h.createResponseChannel(channelID, txid)
|
||||
if err != nil {
|
||||
return h.createResponse(ERROR, []byte(err.Error()))
|
||||
}
|
||||
defer h.deleteResponseChannel(channelID, txid)
|
||||
|
||||
// Send INVOKE_CHAINCODE message to peer chaincode support
|
||||
msg := &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_INVOKE_CHAINCODE, Payload: payloadBytes, Txid: txid, ChannelId: channelID}
|
||||
|
||||
var responseMsg pb.ChaincodeMessage
|
||||
|
||||
if responseMsg, err = h.sendReceive(msg, respChan); err != nil {
|
||||
errStr := fmt.Sprintf("[%s] error sending %s", shorttxid(msg.Txid), pb.ChaincodeMessage_INVOKE_CHAINCODE)
|
||||
return h.createResponse(ERROR, []byte(errStr))
|
||||
}
|
||||
|
||||
if responseMsg.Type == pb.ChaincodeMessage_RESPONSE {
|
||||
// Success response
|
||||
respMsg := &pb.ChaincodeMessage{}
|
||||
if err := proto.Unmarshal(responseMsg.Payload, respMsg); err != nil {
|
||||
return h.createResponse(ERROR, []byte(err.Error()))
|
||||
}
|
||||
if respMsg.Type == pb.ChaincodeMessage_COMPLETED {
|
||||
// Success response
|
||||
res := &pb.Response{}
|
||||
if err = proto.Unmarshal(respMsg.Payload, res); err != nil {
|
||||
return h.createResponse(ERROR, []byte(err.Error()))
|
||||
}
|
||||
return *res
|
||||
}
|
||||
return h.createResponse(ERROR, responseMsg.Payload)
|
||||
}
|
||||
if responseMsg.Type == pb.ChaincodeMessage_ERROR {
|
||||
// Error response
|
||||
return h.createResponse(ERROR, responseMsg.Payload)
|
||||
}
|
||||
|
||||
// Incorrect chaincode message received
|
||||
return h.createResponse(ERROR, []byte(fmt.Sprintf("[%s] Incorrect chaincode message %s received. Expecting %s or %s", shorttxid(responseMsg.Txid), responseMsg.Type, pb.ChaincodeMessage_RESPONSE, pb.ChaincodeMessage_ERROR)))
|
||||
}
|
||||
|
||||
// handleReady handles messages received from the peer when the handler is in the "ready" state.
|
||||
func (h *Handler) handleReady(msg *pb.ChaincodeMessage, errc chan error) error {
|
||||
switch msg.Type {
|
||||
case pb.ChaincodeMessage_RESPONSE, pb.ChaincodeMessage_ERROR:
|
||||
if err := h.handleResponse(msg); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
case pb.ChaincodeMessage_INIT:
|
||||
go h.handleStubInteraction(h.handleInit, msg, errc)
|
||||
return nil
|
||||
|
||||
case pb.ChaincodeMessage_TRANSACTION:
|
||||
go h.handleStubInteraction(h.handleTransaction, msg, errc)
|
||||
return nil
|
||||
|
||||
default:
|
||||
return fmt.Errorf("[%s] Chaincode h cannot handle message (%s) while in state: %s", msg.Txid, msg.Type, h.state)
|
||||
}
|
||||
}
|
||||
|
||||
// handleEstablished handles messages received from the peer when the handler is in the "established" state.
|
||||
func (h *Handler) handleEstablished(msg *pb.ChaincodeMessage, errc chan error) error {
|
||||
if msg.Type != pb.ChaincodeMessage_READY {
|
||||
return fmt.Errorf("[%s] Chaincode h cannot handle message (%s) while in state: %s", msg.Txid, msg.Type, h.state)
|
||||
}
|
||||
|
||||
h.state = ready
|
||||
return nil
|
||||
}
|
||||
|
||||
// hanndleCreated handles messages received from the peer when the handler is in the "created" state.
|
||||
func (h *Handler) handleCreated(msg *pb.ChaincodeMessage, errc chan error) error {
|
||||
if msg.Type != pb.ChaincodeMessage_REGISTERED {
|
||||
return fmt.Errorf("[%s] Chaincode h cannot handle message (%s) while in state: %s", msg.Txid, msg.Type, h.state)
|
||||
}
|
||||
|
||||
h.state = established
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleMessage message handles loop for shim side of chaincode/peer stream.
|
||||
func (h *Handler) handleMessage(msg *pb.ChaincodeMessage, errc chan error) error {
|
||||
if msg.Type == pb.ChaincodeMessage_KEEPALIVE {
|
||||
h.serialSendAsync(msg, errc)
|
||||
return nil
|
||||
}
|
||||
var err error
|
||||
|
||||
switch h.state {
|
||||
case ready:
|
||||
err = h.handleReady(msg, errc)
|
||||
case established:
|
||||
err = h.handleEstablished(msg, errc)
|
||||
case created:
|
||||
err = h.handleCreated(msg, errc)
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid handler state: %s", h.state))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
payload := []byte(err.Error())
|
||||
errorMsg := &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_ERROR, Payload: payload, Txid: msg.Txid}
|
||||
h.serialSend(errorMsg)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// marshalOrPanic attempts to marshal the provided protobbuf message but will panic
|
||||
// when marshaling fails instead of returning an error.
|
||||
func marshalOrPanic(msg proto.Message) []byte {
|
||||
bytes, err := proto.Marshal(msg)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to marshal message: %s", err))
|
||||
}
|
||||
return bytes
|
||||
}
|
392
chaincode/vendor/github.com/hyperledger/fabric-chaincode-go/shim/interfaces.go
generated
vendored
392
chaincode/vendor/github.com/hyperledger/fabric-chaincode-go/shim/interfaces.go
generated
vendored
@ -0,0 +1,392 @@
|
||||
// Copyright the Hyperledger Fabric contributors. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package shim
|
||||
|
||||
import (
|
||||
"github.com/golang/protobuf/ptypes/timestamp"
|
||||
"github.com/hyperledger/fabric-protos-go/ledger/queryresult"
|
||||
pb "github.com/hyperledger/fabric-protos-go/peer"
|
||||
)
|
||||
|
||||
// Chaincode interface must be implemented by all chaincodes. The fabric runs
|
||||
// the transactions by calling these functions as specified.
|
||||
type Chaincode interface {
|
||||
// Init is called during Instantiate transaction after the chaincode container
|
||||
// has been established for the first time, allowing the chaincode to
|
||||
// initialize its internal data
|
||||
Init(stub ChaincodeStubInterface) pb.Response
|
||||
|
||||
// Invoke is called to update or query the ledger in a proposal transaction.
|
||||
// Updated state variables are not committed to the ledger until the
|
||||
// transaction is committed.
|
||||
Invoke(stub ChaincodeStubInterface) pb.Response
|
||||
}
|
||||
|
||||
// ChaincodeStubInterface is used by deployable chaincode apps to access and
|
||||
// modify their ledgers
|
||||
type ChaincodeStubInterface interface {
|
||||
// GetArgs returns the arguments intended for the chaincode Init and Invoke
|
||||
// as an array of byte arrays.
|
||||
GetArgs() [][]byte
|
||||
|
||||
// GetStringArgs returns the arguments intended for the chaincode Init and
|
||||
// Invoke as a string array. Only use GetStringArgs if the client passes
|
||||
// arguments intended to be used as strings.
|
||||
GetStringArgs() []string
|
||||
|
||||
// GetFunctionAndParameters returns the first argument as the function
|
||||
// name and the rest of the arguments as parameters in a string array.
|
||||
// Only use GetFunctionAndParameters if the client passes arguments intended
|
||||
// to be used as strings.
|
||||
GetFunctionAndParameters() (string, []string)
|
||||
|
||||
// GetArgsSlice returns the arguments intended for the chaincode Init and
|
||||
// Invoke as a byte array
|
||||
GetArgsSlice() ([]byte, error)
|
||||
|
||||
// GetTxID returns the tx_id of the transaction proposal, which is unique per
|
||||
// transaction and per client. See
|
||||
// https://godoc.org/github.com/hyperledger/fabric-protos-go/common#ChannelHeader
|
||||
// for further details.
|
||||
GetTxID() string
|
||||
|
||||
// GetChannelID returns the channel the proposal is sent to for chaincode to process.
|
||||
// This would be the channel_id of the transaction proposal (see
|
||||
// https://godoc.org/github.com/hyperledger/fabric-protos-go/common#ChannelHeader )
|
||||
// except where the chaincode is calling another on a different channel.
|
||||
GetChannelID() string
|
||||
|
||||
// InvokeChaincode locally calls the specified chaincode `Invoke` using the
|
||||
// same transaction context; that is, chaincode calling chaincode doesn't
|
||||
// create a new transaction message.
|
||||
// If the called chaincode is on the same channel, it simply adds the called
|
||||
// chaincode read set and write set to the calling transaction.
|
||||
// If the called chaincode is on a different channel,
|
||||
// only the Response is returned to the calling chaincode; any PutState calls
|
||||
// from the called chaincode will not have any effect on the ledger; that is,
|
||||
// the called chaincode on a different channel will not have its read set
|
||||
// and write set applied to the transaction. Only the calling chaincode's
|
||||
// read set and write set will be applied to the transaction. Effectively
|
||||
// the called chaincode on a different channel is a `Query`, which does not
|
||||
// participate in state validation checks in subsequent commit phase.
|
||||
// If `channel` is empty, the caller's channel is assumed.
|
||||
InvokeChaincode(chaincodeName string, args [][]byte, channel string) pb.Response
|
||||
|
||||
// GetState returns the value of the specified `key` from the
|
||||
// ledger. Note that GetState doesn't read data from the writeset, which
|
||||
// has not been committed to the ledger. In other words, GetState doesn't
|
||||
// consider data modified by PutState that has not been committed.
|
||||
// If the key does not exist in the state database, (nil, nil) is returned.
|
||||
GetState(key string) ([]byte, error)
|
||||
|
||||
// PutState puts the specified `key` and `value` into the transaction's
|
||||
// writeset as a data-write proposal. PutState doesn't effect the ledger
|
||||
// until the transaction is validated and successfully committed.
|
||||
// Simple keys must not be an empty string and must not start with a
|
||||
// null character (0x00) in order to avoid range query collisions with
|
||||
// composite keys, which internally get prefixed with 0x00 as composite
|
||||
// key namespace. In addition, if using CouchDB, keys can only contain
|
||||
// valid UTF-8 strings and cannot begin with an underscore ("_").
|
||||
PutState(key string, value []byte) error
|
||||
|
||||
// DelState records the specified `key` to be deleted in the writeset of
|
||||
// the transaction proposal. The `key` and its value will be deleted from
|
||||
// the ledger when the transaction is validated and successfully committed.
|
||||
DelState(key string) error
|
||||
|
||||
// SetStateValidationParameter sets the key-level endorsement policy for `key`.
|
||||
SetStateValidationParameter(key string, ep []byte) error
|
||||
|
||||
// GetStateValidationParameter retrieves the key-level endorsement policy
|
||||
// for `key`. Note that this will introduce a read dependency on `key` in
|
||||
// the transaction's readset.
|
||||
GetStateValidationParameter(key string) ([]byte, error)
|
||||
|
||||
// GetStateByRange returns a range iterator over a set of keys in the
|
||||
// ledger. The iterator can be used to iterate over all keys
|
||||
// between the startKey (inclusive) and endKey (exclusive).
|
||||
// However, if the number of keys between startKey and endKey is greater than the
|
||||
// totalQueryLimit (defined in core.yaml), this iterator cannot be used
|
||||
// to fetch all keys (results will be capped by the totalQueryLimit).
|
||||
// The keys are returned by the iterator in lexical order. Note
|
||||
// that startKey and endKey can be empty string, which implies unbounded range
|
||||
// query on start or end.
|
||||
// Call Close() on the returned StateQueryIteratorInterface object when done.
|
||||
// The query is re-executed during validation phase to ensure result set
|
||||
// has not changed since transaction endorsement (phantom reads detected).
|
||||
GetStateByRange(startKey, endKey string) (StateQueryIteratorInterface, error)
|
||||
|
||||
// GetStateByRangeWithPagination returns a range iterator over a set of keys in the
|
||||
// ledger. The iterator can be used to fetch keys between the startKey (inclusive)
|
||||
// and endKey (exclusive).
|
||||
// When an empty string is passed as a value to the bookmark argument, the returned
|
||||
// iterator can be used to fetch the first `pageSize` keys between the startKey
|
||||
// (inclusive) and endKey (exclusive).
|
||||
// When the bookmark is a non-emptry string, the iterator can be used to fetch
|
||||
// the first `pageSize` keys between the bookmark (inclusive) and endKey (exclusive).
|
||||
// Note that only the bookmark present in a prior page of query results (ResponseMetadata)
|
||||
// can be used as a value to the bookmark argument. Otherwise, an empty string must
|
||||
// be passed as bookmark.
|
||||
// The keys are returned by the iterator in lexical order. Note
|
||||
// that startKey and endKey can be empty string, which implies unbounded range
|
||||
// query on start or end.
|
||||
// Call Close() on the returned StateQueryIteratorInterface object when done.
|
||||
// This call is only supported in a read only transaction.
|
||||
GetStateByRangeWithPagination(startKey, endKey string, pageSize int32,
|
||||
bookmark string) (StateQueryIteratorInterface, *pb.QueryResponseMetadata, error)
|
||||
|
||||
// GetStateByPartialCompositeKey queries the state in the ledger based on
|
||||
// a given partial composite key. This function returns an iterator
|
||||
// which can be used to iterate over all composite keys whose prefix matches
|
||||
// the given partial composite key. However, if the number of matching composite
|
||||
// keys is greater than the totalQueryLimit (defined in core.yaml), this iterator
|
||||
// cannot be used to fetch all matching keys (results will be limited by the totalQueryLimit).
|
||||
// The `objectType` and attributes are expected to have only valid utf8 strings and
|
||||
// should not contain U+0000 (nil byte) and U+10FFFF (biggest and unallocated code point).
|
||||
// See related functions SplitCompositeKey and CreateCompositeKey.
|
||||
// Call Close() on the returned StateQueryIteratorInterface object when done.
|
||||
// The query is re-executed during validation phase to ensure result set
|
||||
// has not changed since transaction endorsement (phantom reads detected).
|
||||
GetStateByPartialCompositeKey(objectType string, keys []string) (StateQueryIteratorInterface, error)
|
||||
|
||||
// GetStateByPartialCompositeKeyWithPagination queries the state in the ledger based on
|
||||
// a given partial composite key. This function returns an iterator
|
||||
// which can be used to iterate over the composite keys whose
|
||||
// prefix matches the given partial composite key.
|
||||
// When an empty string is passed as a value to the bookmark argument, the returned
|
||||
// iterator can be used to fetch the first `pageSize` composite keys whose prefix
|
||||
// matches the given partial composite key.
|
||||
// When the bookmark is a non-emptry string, the iterator can be used to fetch
|
||||
// the first `pageSize` keys between the bookmark (inclusive) and the last matching
|
||||
// composite key.
|
||||
// Note that only the bookmark present in a prior page of query result (ResponseMetadata)
|
||||
// can be used as a value to the bookmark argument. Otherwise, an empty string must
|
||||
// be passed as bookmark.
|
||||
// The `objectType` and attributes are expected to have only valid utf8 strings
|
||||
// and should not contain U+0000 (nil byte) and U+10FFFF (biggest and unallocated
|
||||
// code point). See related functions SplitCompositeKey and CreateCompositeKey.
|
||||
// Call Close() on the returned StateQueryIteratorInterface object when done.
|
||||
// This call is only supported in a read only transaction.
|
||||
GetStateByPartialCompositeKeyWithPagination(objectType string, keys []string,
|
||||
pageSize int32, bookmark string) (StateQueryIteratorInterface, *pb.QueryResponseMetadata, error)
|
||||
|
||||
// CreateCompositeKey combines the given `attributes` to form a composite
|
||||
// key. The objectType and attributes are expected to have only valid utf8
|
||||
// strings and should not contain U+0000 (nil byte) and U+10FFFF
|
||||
// (biggest and unallocated code point).
|
||||
// The resulting composite key can be used as the key in PutState().
|
||||
CreateCompositeKey(objectType string, attributes []string) (string, error)
|
||||
|
||||
// SplitCompositeKey splits the specified key into attributes on which the
|
||||
// composite key was formed. Composite keys found during range queries
|
||||
// or partial composite key queries can therefore be split into their
|
||||
// composite parts.
|
||||
SplitCompositeKey(compositeKey string) (string, []string, error)
|
||||
|
||||
// GetQueryResult performs a "rich" query against a state database. It is
|
||||
// only supported for state databases that support rich query,
|
||||
// e.g.CouchDB. The query string is in the native syntax
|
||||
// of the underlying state database. An iterator is returned
|
||||
// which can be used to iterate over all keys in the query result set.
|
||||
// However, if the number of keys in the query result set is greater than the
|
||||
// totalQueryLimit (defined in core.yaml), this iterator cannot be used
|
||||
// to fetch all keys in the query result set (results will be limited by
|
||||
// the totalQueryLimit).
|
||||
// The query is NOT re-executed during validation phase, phantom reads are
|
||||
// not detected. That is, other committed transactions may have added,
|
||||
// updated, or removed keys that impact the result set, and this would not
|
||||
// be detected at validation/commit time. Applications susceptible to this
|
||||
// should therefore not use GetQueryResult as part of transactions that update
|
||||
// ledger, and should limit use to read-only chaincode operations.
|
||||
GetQueryResult(query string) (StateQueryIteratorInterface, error)
|
||||
|
||||
// GetQueryResultWithPagination performs a "rich" query against a state database.
|
||||
// It is only supported for state databases that support rich query,
|
||||
// e.g., CouchDB. The query string is in the native syntax
|
||||
// of the underlying state database. An iterator is returned
|
||||
// which can be used to iterate over keys in the query result set.
|
||||
// When an empty string is passed as a value to the bookmark argument, the returned
|
||||
// iterator can be used to fetch the first `pageSize` of query results.
|
||||
// When the bookmark is a non-emptry string, the iterator can be used to fetch
|
||||
// the first `pageSize` keys between the bookmark and the last key in the query result.
|
||||
// Note that only the bookmark present in a prior page of query results (ResponseMetadata)
|
||||
// can be used as a value to the bookmark argument. Otherwise, an empty string
|
||||
// must be passed as bookmark.
|
||||
// This call is only supported in a read only transaction.
|
||||
GetQueryResultWithPagination(query string, pageSize int32,
|
||||
bookmark string) (StateQueryIteratorInterface, *pb.QueryResponseMetadata, error)
|
||||
|
||||
// GetHistoryForKey returns a history of key values across time.
|
||||
// For each historic key update, the historic value and associated
|
||||
// transaction id and timestamp are returned. The timestamp is the
|
||||
// timestamp provided by the client in the proposal header.
|
||||
// GetHistoryForKey requires peer configuration
|
||||
// core.ledger.history.enableHistoryDatabase to be true.
|
||||
// The query is NOT re-executed during validation phase, phantom reads are
|
||||
// not detected. That is, other committed transactions may have updated
|
||||
// the key concurrently, impacting the result set, and this would not be
|
||||
// detected at validation/commit time. Applications susceptible to this
|
||||
// should therefore not use GetHistoryForKey as part of transactions that
|
||||
// update ledger, and should limit use to read-only chaincode operations.
|
||||
GetHistoryForKey(key string) (HistoryQueryIteratorInterface, error)
|
||||
|
||||
// GetPrivateData returns the value of the specified `key` from the specified
|
||||
// `collection`. Note that GetPrivateData doesn't read data from the
|
||||
// private writeset, which has not been committed to the `collection`. In
|
||||
// other words, GetPrivateData doesn't consider data modified by PutPrivateData
|
||||
// that has not been committed.
|
||||
GetPrivateData(collection, key string) ([]byte, error)
|
||||
|
||||
// GetPrivateDataHash returns the hash of the value of the specified `key` from the specified
|
||||
// `collection`
|
||||
GetPrivateDataHash(collection, key string) ([]byte, error)
|
||||
|
||||
// PutPrivateData puts the specified `key` and `value` into the transaction's
|
||||
// private writeset. Note that only hash of the private writeset goes into the
|
||||
// transaction proposal response (which is sent to the client who issued the
|
||||
// transaction) and the actual private writeset gets temporarily stored in a
|
||||
// transient store. PutPrivateData doesn't effect the `collection` until the
|
||||
// transaction is validated and successfully committed. Simple keys must not
|
||||
// be an empty string and must not start with a null character (0x00) in order
|
||||
// to avoid range query collisions with composite keys, which internally get
|
||||
// prefixed with 0x00 as composite key namespace. In addition, if using
|
||||
// CouchDB, keys can only contain valid UTF-8 strings and cannot begin with an
|
||||
// an underscore ("_").
|
||||
PutPrivateData(collection string, key string, value []byte) error
|
||||
|
||||
// DelPrivateData records the specified `key` to be deleted in the private writeset
|
||||
// of the transaction. Note that only hash of the private writeset goes into the
|
||||
// transaction proposal response (which is sent to the client who issued the
|
||||
// transaction) and the actual private writeset gets temporarily stored in a
|
||||
// transient store. The `key` and its value will be deleted from the collection
|
||||
// when the transaction is validated and successfully committed.
|
||||
DelPrivateData(collection, key string) error
|
||||
|
||||
// SetPrivateDataValidationParameter sets the key-level endorsement policy
|
||||
// for the private data specified by `key`.
|
||||
SetPrivateDataValidationParameter(collection, key string, ep []byte) error
|
||||
|
||||
// GetPrivateDataValidationParameter retrieves the key-level endorsement
|
||||
// policy for the private data specified by `key`. Note that this introduces
|
||||
// a read dependency on `key` in the transaction's readset.
|
||||
GetPrivateDataValidationParameter(collection, key string) ([]byte, error)
|
||||
|
||||
// GetPrivateDataByRange returns a range iterator over a set of keys in a
|
||||
// given private collection. The iterator can be used to iterate over all keys
|
||||
// between the startKey (inclusive) and endKey (exclusive).
|
||||
// The keys are returned by the iterator in lexical order. Note
|
||||
// that startKey and endKey can be empty string, which implies unbounded range
|
||||
// query on start or end.
|
||||
// Call Close() on the returned StateQueryIteratorInterface object when done.
|
||||
// The query is re-executed during validation phase to ensure result set
|
||||
// has not changed since transaction endorsement (phantom reads detected).
|
||||
GetPrivateDataByRange(collection, startKey, endKey string) (StateQueryIteratorInterface, error)
|
||||
|
||||
// GetPrivateDataByPartialCompositeKey queries the state in a given private
|
||||
// collection based on a given partial composite key. This function returns
|
||||
// an iterator which can be used to iterate over all composite keys whose prefix
|
||||
// matches the given partial composite key. The `objectType` and attributes are
|
||||
// expected to have only valid utf8 strings and should not contain
|
||||
// U+0000 (nil byte) and U+10FFFF (biggest and unallocated code point).
|
||||
// See related functions SplitCompositeKey and CreateCompositeKey.
|
||||
// Call Close() on the returned StateQueryIteratorInterface object when done.
|
||||
// The query is re-executed during validation phase to ensure result set
|
||||
// has not changed since transaction endorsement (phantom reads detected).
|
||||
GetPrivateDataByPartialCompositeKey(collection, objectType string, keys []string) (StateQueryIteratorInterface, error)
|
||||
|
||||
// GetPrivateDataQueryResult performs a "rich" query against a given private
|
||||
// collection. It is only supported for state databases that support rich query,
|
||||
// e.g.CouchDB. The query string is in the native syntax
|
||||
// of the underlying state database. An iterator is returned
|
||||
// which can be used to iterate (next) over the query result set.
|
||||
// The query is NOT re-executed during validation phase, phantom reads are
|
||||
// not detected. That is, other committed transactions may have added,
|
||||
// updated, or removed keys that impact the result set, and this would not
|
||||
// be detected at validation/commit time. Applications susceptible to this
|
||||
// should therefore not use GetPrivateDataQueryResult as part of transactions that update
|
||||
// ledger, and should limit use to read-only chaincode operations.
|
||||
GetPrivateDataQueryResult(collection, query string) (StateQueryIteratorInterface, error)
|
||||
|
||||
// GetCreator returns `SignatureHeader.Creator` (e.g. an identity)
|
||||
// of the `SignedProposal`. This is the identity of the agent (or user)
|
||||
// submitting the transaction.
|
||||
GetCreator() ([]byte, error)
|
||||
|
||||
// GetTransient returns the `ChaincodeProposalPayload.Transient` field.
|
||||
// It is a map that contains data (e.g. cryptographic material)
|
||||
// that might be used to implement some form of application-level
|
||||
// confidentiality. The contents of this field, as prescribed by
|
||||
// `ChaincodeProposalPayload`, are supposed to always
|
||||
// be omitted from the transaction and excluded from the ledger.
|
||||
GetTransient() (map[string][]byte, error)
|
||||
|
||||
// GetBinding returns the transaction binding, which is used to enforce a
|
||||
// link between application data (like those stored in the transient field
|
||||
// above) to the proposal itself. This is useful to avoid possible replay
|
||||
// attacks.
|
||||
GetBinding() ([]byte, error)
|
||||
|
||||
// GetDecorations returns additional data (if applicable) about the proposal
|
||||
// that originated from the peer. This data is set by the decorators of the
|
||||
// peer, which append or mutate the chaincode input passed to the chaincode.
|
||||
GetDecorations() map[string][]byte
|
||||
|
||||
// GetSignedProposal returns the SignedProposal object, which contains all
|
||||
// data elements part of a transaction proposal.
|
||||
GetSignedProposal() (*pb.SignedProposal, error)
|
||||
|
||||
// GetTxTimestamp returns the timestamp when the transaction was created. This
|
||||
// is taken from the transaction ChannelHeader, therefore it will indicate the
|
||||
// client's timestamp and will have the same value across all endorsers.
|
||||
GetTxTimestamp() (*timestamp.Timestamp, error)
|
||||
|
||||
// SetEvent allows the chaincode to set an event on the response to the
|
||||
// proposal to be included as part of a transaction. The event will be
|
||||
// available within the transaction in the committed block regardless of the
|
||||
// validity of the transaction.
|
||||
// Only a single event can be included in a transaction, and must originate
|
||||
// from the outer-most invoked chaincode in chaincode-to-chaincode scenarios.
|
||||
// The marshaled ChaincodeEvent will be available in the transaction's ChaincodeAction.events field.
|
||||
SetEvent(name string, payload []byte) error
|
||||
}
|
||||
|
||||
// CommonIteratorInterface allows a chaincode to check whether any more result
|
||||
// to be fetched from an iterator and close it when done.
|
||||
type CommonIteratorInterface interface {
|
||||
// HasNext returns true if the range query iterator contains additional keys
|
||||
// and values.
|
||||
HasNext() bool
|
||||
|
||||
// Close closes the iterator. This should be called when done
|
||||
// reading from the iterator to free up resources.
|
||||
Close() error
|
||||
}
|
||||
|
||||
// StateQueryIteratorInterface allows a chaincode to iterate over a set of
|
||||
// key/value pairs returned by range and execute query.
|
||||
type StateQueryIteratorInterface interface {
|
||||
// Inherit HasNext() and Close()
|
||||
CommonIteratorInterface
|
||||
|
||||
// Next returns the next key and value in the range and execute query iterator.
|
||||
Next() (*queryresult.KV, error)
|
||||
}
|
||||
|
||||
// HistoryQueryIteratorInterface allows a chaincode to iterate over a set of
|
||||
// key/value pairs returned by a history query.
|
||||
type HistoryQueryIteratorInterface interface {
|
||||
// Inherit HasNext() and Close()
|
||||
CommonIteratorInterface
|
||||
|
||||
// Next returns the next key and value in the history query iterator.
|
||||
Next() (*queryresult.KeyModification, error)
|
||||
}
|
||||
|
||||
// MockQueryIteratorInterface allows a chaincode to iterate over a set of
|
||||
// key/value pairs returned by range query.
|
||||
// TODO: Once the execute query and history query are implemented in MockStub,
|
||||
// we need to update this interface
|
||||
type MockQueryIteratorInterface interface {
|
||||
StateQueryIteratorInterface
|
||||
}
|
55
chaincode/vendor/github.com/hyperledger/fabric-chaincode-go/shim/internal/client.go
generated
vendored
55
chaincode/vendor/github.com/hyperledger/fabric-chaincode-go/shim/internal/client.go
generated
vendored
@ -0,0 +1,55 @@
|
||||
// Copyright the Hyperledger Fabric contributors. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"time"
|
||||
|
||||
peerpb "github.com/hyperledger/fabric-protos-go/peer"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
)
|
||||
|
||||
const (
|
||||
dialTimeout = 10 * time.Second
|
||||
maxRecvMessageSize = 100 * 1024 * 1024 // 100 MiB
|
||||
maxSendMessageSize = 100 * 1024 * 1024 // 100 MiB
|
||||
)
|
||||
|
||||
// NewClientConn ...
|
||||
func NewClientConn(
|
||||
address string,
|
||||
tlsConf *tls.Config,
|
||||
kaOpts keepalive.ClientParameters,
|
||||
) (*grpc.ClientConn, error) {
|
||||
|
||||
dialOpts := []grpc.DialOption{
|
||||
grpc.WithKeepaliveParams(kaOpts),
|
||||
grpc.WithBlock(),
|
||||
grpc.FailOnNonTempDialError(true),
|
||||
grpc.WithDefaultCallOptions(
|
||||
grpc.MaxCallRecvMsgSize(maxRecvMessageSize),
|
||||
grpc.MaxCallSendMsgSize(maxSendMessageSize),
|
||||
),
|
||||
}
|
||||
|
||||
if tlsConf != nil {
|
||||
creds := credentials.NewTLS(tlsConf)
|
||||
dialOpts = append(dialOpts, grpc.WithTransportCredentials(creds))
|
||||
} else {
|
||||
dialOpts = append(dialOpts, grpc.WithInsecure())
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), dialTimeout)
|
||||
defer cancel()
|
||||
return grpc.DialContext(ctx, address, dialOpts...)
|
||||
}
|
||||
|
||||
// NewRegisterClient ...
|
||||
func NewRegisterClient(conn *grpc.ClientConn) (peerpb.ChaincodeSupport_RegisterClient, error) {
|
||||
return peerpb.NewChaincodeSupportClient(conn).Register(context.Background())
|
||||
}
|
151
chaincode/vendor/github.com/hyperledger/fabric-chaincode-go/shim/internal/config.go
generated
vendored
151
chaincode/vendor/github.com/hyperledger/fabric-chaincode-go/shim/internal/config.go
generated
vendored
@ -0,0 +1,151 @@
|
||||
// Copyright the Hyperledger Fabric contributors. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/keepalive"
|
||||
)
|
||||
|
||||
// Config contains chaincode's configuration
|
||||
type Config struct {
|
||||
ChaincodeName string
|
||||
TLS *tls.Config
|
||||
KaOpts keepalive.ClientParameters
|
||||
}
|
||||
|
||||
// LoadConfig loads the chaincode configuration
|
||||
func LoadConfig() (Config, error) {
|
||||
var err error
|
||||
tlsEnabled, err := strconv.ParseBool(os.Getenv("CORE_PEER_TLS_ENABLED"))
|
||||
if err != nil {
|
||||
return Config{}, errors.New("'CORE_PEER_TLS_ENABLED' must be set to 'true' or 'false'")
|
||||
}
|
||||
|
||||
conf := Config{
|
||||
ChaincodeName: os.Getenv("CORE_CHAINCODE_ID_NAME"),
|
||||
// hardcode to match chaincode server
|
||||
KaOpts: keepalive.ClientParameters{
|
||||
Time: 1 * time.Minute,
|
||||
Timeout: 20 * time.Second,
|
||||
PermitWithoutStream: true,
|
||||
},
|
||||
}
|
||||
|
||||
if !tlsEnabled {
|
||||
return conf, nil
|
||||
}
|
||||
|
||||
var key []byte
|
||||
path, set := os.LookupEnv("CORE_TLS_CLIENT_KEY_FILE")
|
||||
if set {
|
||||
key, err = ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return Config{}, fmt.Errorf("failed to read private key file: %s", err)
|
||||
}
|
||||
} else {
|
||||
data, err := ioutil.ReadFile(os.Getenv("CORE_TLS_CLIENT_KEY_PATH"))
|
||||
if err != nil {
|
||||
return Config{}, fmt.Errorf("failed to read private key file: %s", err)
|
||||
}
|
||||
key, err = base64.StdEncoding.DecodeString(string(data))
|
||||
if err != nil {
|
||||
return Config{}, fmt.Errorf("failed to decode private key file: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
var cert []byte
|
||||
path, set = os.LookupEnv("CORE_TLS_CLIENT_CERT_FILE")
|
||||
if set {
|
||||
cert, err = ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return Config{}, fmt.Errorf("failed to read public key file: %s", err)
|
||||
}
|
||||
} else {
|
||||
data, err := ioutil.ReadFile(os.Getenv("CORE_TLS_CLIENT_CERT_PATH"))
|
||||
if err != nil {
|
||||
return Config{}, fmt.Errorf("failed to read public key file: %s", err)
|
||||
}
|
||||
cert, err = base64.StdEncoding.DecodeString(string(data))
|
||||
if err != nil {
|
||||
return Config{}, fmt.Errorf("failed to decode public key file: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
root, err := ioutil.ReadFile(os.Getenv("CORE_PEER_TLS_ROOTCERT_FILE"))
|
||||
if err != nil {
|
||||
return Config{}, fmt.Errorf("failed to read root cert file: %s", err)
|
||||
}
|
||||
|
||||
tlscfg, err := LoadTLSConfig(false, key, cert, root)
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
|
||||
conf.TLS = tlscfg
|
||||
|
||||
return conf, nil
|
||||
}
|
||||
|
||||
// LoadTLSConfig loads the TLS configuration for the chaincode
|
||||
func LoadTLSConfig(isserver bool, key, cert, root []byte) (*tls.Config, error) {
|
||||
if key == nil {
|
||||
return nil, fmt.Errorf("key not provided")
|
||||
}
|
||||
|
||||
if cert == nil {
|
||||
return nil, fmt.Errorf("cert not provided")
|
||||
}
|
||||
|
||||
if !isserver && root == nil {
|
||||
return nil, fmt.Errorf("root cert not provided")
|
||||
}
|
||||
|
||||
cccert, err := tls.X509KeyPair(cert, key)
|
||||
if err != nil {
|
||||
return nil, errors.New("failed to parse client key pair")
|
||||
}
|
||||
|
||||
var rootCertPool *x509.CertPool
|
||||
if root != nil {
|
||||
rootCertPool = x509.NewCertPool()
|
||||
if ok := rootCertPool.AppendCertsFromPEM(root); !ok {
|
||||
return nil, errors.New("failed to load root cert file")
|
||||
}
|
||||
}
|
||||
|
||||
tlscfg := &tls.Config{
|
||||
MinVersion: tls.VersionTLS12,
|
||||
Certificates: []tls.Certificate{cccert},
|
||||
}
|
||||
|
||||
//follow Peer's server default config properties
|
||||
if isserver {
|
||||
tlscfg.ClientCAs = rootCertPool
|
||||
tlscfg.SessionTicketsDisabled = true
|
||||
tlscfg.CipherSuites = []uint16{tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||
tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
|
||||
}
|
||||
if rootCertPool != nil {
|
||||
tlscfg.ClientAuth = tls.RequireAndVerifyClientCert
|
||||
}
|
||||
} else {
|
||||
tlscfg.RootCAs = rootCertPool
|
||||
}
|
||||
|
||||
return tlscfg, nil
|
||||
}
|
106
chaincode/vendor/github.com/hyperledger/fabric-chaincode-go/shim/internal/server.go
generated
vendored
106
chaincode/vendor/github.com/hyperledger/fabric-chaincode-go/shim/internal/server.go
generated
vendored
@ -0,0 +1,106 @@
|
||||
// Copyright the Hyperledger Fabric contributors. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
)
|
||||
|
||||
const (
|
||||
serverInterval = time.Duration(2) * time.Hour // 2 hours - gRPC default
|
||||
serverTimeout = time.Duration(20) * time.Second // 20 sec - gRPC default
|
||||
serverMinInterval = time.Duration(1) * time.Minute
|
||||
connectionTimeout = 5 * time.Second
|
||||
)
|
||||
|
||||
// Server abstracts grpc service properties
|
||||
type Server struct {
|
||||
Listener net.Listener
|
||||
Server *grpc.Server
|
||||
}
|
||||
|
||||
// Start the server
|
||||
func (s *Server) Start() error {
|
||||
if s.Listener == nil {
|
||||
return errors.New("nil listener")
|
||||
}
|
||||
|
||||
if s.Server == nil {
|
||||
return errors.New("nil server")
|
||||
}
|
||||
|
||||
return s.Server.Serve(s.Listener)
|
||||
}
|
||||
|
||||
// Stop the server
|
||||
func (s *Server) Stop() {
|
||||
if s.Server != nil {
|
||||
s.Server.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
// NewServer creates a new implementation of a GRPC Server given a
|
||||
// listen address
|
||||
func NewServer(
|
||||
address string,
|
||||
tlsConf *tls.Config,
|
||||
srvKaOpts *keepalive.ServerParameters,
|
||||
) (*Server, error) {
|
||||
if address == "" {
|
||||
return nil, errors.New("server listen address not provided")
|
||||
}
|
||||
|
||||
//create our listener
|
||||
listener, err := net.Listen("tcp", address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//set up server options for keepalive and TLS
|
||||
var serverOpts []grpc.ServerOption
|
||||
|
||||
if srvKaOpts != nil {
|
||||
serverOpts = append(serverOpts, grpc.KeepaliveParams(*srvKaOpts))
|
||||
} else {
|
||||
serverKeepAliveParameters := keepalive.ServerParameters{
|
||||
Time: 1 * time.Minute,
|
||||
Timeout: 20 * time.Second,
|
||||
}
|
||||
serverOpts = append(serverOpts, grpc.KeepaliveParams(serverKeepAliveParameters))
|
||||
}
|
||||
|
||||
if tlsConf != nil {
|
||||
serverOpts = append(serverOpts, grpc.Creds(credentials.NewTLS(tlsConf)))
|
||||
}
|
||||
|
||||
// Default properties follow - let's start simple and stick with defaults for now.
|
||||
// These match Fabric peer side properties. We can expose these as user properties
|
||||
// if needed
|
||||
|
||||
// set max send and recv msg sizes
|
||||
serverOpts = append(serverOpts, grpc.MaxSendMsgSize(maxSendMessageSize))
|
||||
serverOpts = append(serverOpts, grpc.MaxRecvMsgSize(maxRecvMessageSize))
|
||||
|
||||
//set enforcement policy
|
||||
kep := keepalive.EnforcementPolicy{
|
||||
MinTime: serverMinInterval,
|
||||
// allow keepalive w/o rpc
|
||||
PermitWithoutStream: true,
|
||||
}
|
||||
serverOpts = append(serverOpts, grpc.KeepaliveEnforcementPolicy(kep))
|
||||
|
||||
//set default connection timeout
|
||||
serverOpts = append(serverOpts, grpc.ConnectionTimeout(connectionTimeout))
|
||||
|
||||
server := grpc.NewServer(serverOpts...)
|
||||
|
||||
return &Server{Listener: listener, Server: server}, nil
|
||||
}
|
@ -0,0 +1,36 @@
|
||||
// Copyright the Hyperledger Fabric contributors. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package shim
|
||||
|
||||
import (
|
||||
pb "github.com/hyperledger/fabric-protos-go/peer"
|
||||
)
|
||||
|
||||
const (
|
||||
// OK constant - status code less than 400, endorser will endorse it.
|
||||
// OK means init or invoke successfully.
|
||||
OK = 200
|
||||
|
||||
// ERRORTHRESHOLD constant - status code greater than or equal to 400 will be considered an error and rejected by endorser.
|
||||
ERRORTHRESHOLD = 400
|
||||
|
||||
// ERROR constant - default error value
|
||||
ERROR = 500
|
||||
)
|
||||
|
||||
// Success ...
|
||||
func Success(payload []byte) pb.Response {
|
||||
return pb.Response{
|
||||
Status: OK,
|
||||
Payload: payload,
|
||||
}
|
||||
}
|
||||
|
||||
// Error ...
|
||||
func Error(msg string) pb.Response {
|
||||
return pb.Response{
|
||||
Status: ERROR,
|
||||
Message: msg,
|
||||
}
|
||||
}
|
@ -0,0 +1,153 @@
|
||||
// Copyright the Hyperledger Fabric contributors. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package shim provides APIs for the chaincode to access its state
|
||||
// variables, transaction context and call other chaincodes.
|
||||
package shim
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/hyperledger/fabric-chaincode-go/shim/internal"
|
||||
peerpb "github.com/hyperledger/fabric-protos-go/peer"
|
||||
)
|
||||
|
||||
const (
|
||||
minUnicodeRuneValue = 0 //U+0000
|
||||
maxUnicodeRuneValue = utf8.MaxRune //U+10FFFF - maximum (and unallocated) code point
|
||||
compositeKeyNamespace = "\x00"
|
||||
emptyKeySubstitute = "\x01"
|
||||
)
|
||||
|
||||
// peer as server
|
||||
var peerAddress = flag.String("peer.address", "", "peer address")
|
||||
|
||||
//this separates the chaincode stream interface establishment
|
||||
//so we can replace it with a mock peer stream
|
||||
type peerStreamGetter func(name string) (ClientStream, error)
|
||||
|
||||
//UTs to setup mock peer stream getter
|
||||
var streamGetter peerStreamGetter
|
||||
|
||||
//the non-mock user CC stream establishment func
|
||||
func userChaincodeStreamGetter(name string) (ClientStream, error) {
|
||||
if *peerAddress == "" {
|
||||
return nil, errors.New("flag 'peer.address' must be set")
|
||||
}
|
||||
|
||||
conf, err := internal.LoadConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
conn, err := internal.NewClientConn(*peerAddress, conf.TLS, conf.KaOpts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return internal.NewRegisterClient(conn)
|
||||
}
|
||||
|
||||
// Start chaincodes
|
||||
func Start(cc Chaincode) error {
|
||||
flag.Parse()
|
||||
chaincodename := os.Getenv("CORE_CHAINCODE_ID_NAME")
|
||||
if chaincodename == "" {
|
||||
return errors.New("'CORE_CHAINCODE_ID_NAME' must be set")
|
||||
}
|
||||
|
||||
//mock stream not set up ... get real stream
|
||||
if streamGetter == nil {
|
||||
streamGetter = userChaincodeStreamGetter
|
||||
}
|
||||
|
||||
stream, err := streamGetter(chaincodename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = chaincodeAsClientChat(chaincodename, stream, cc)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// StartInProc is an entry point for system chaincodes bootstrap. It is not an
|
||||
// API for chaincodes.
|
||||
func StartInProc(chaincodename string, stream ClientStream, cc Chaincode) error {
|
||||
return chaincodeAsClientChat(chaincodename, stream, cc)
|
||||
}
|
||||
|
||||
// this is the chat stream resulting from the chaincode-as-client model where the chaincode initiates connection
|
||||
func chaincodeAsClientChat(chaincodename string, stream ClientStream, cc Chaincode) error {
|
||||
defer stream.CloseSend()
|
||||
return chatWithPeer(chaincodename, stream, cc)
|
||||
}
|
||||
|
||||
// chat stream for peer-chaincode interactions post connection
|
||||
func chatWithPeer(chaincodename string, stream PeerChaincodeStream, cc Chaincode) error {
|
||||
// Create the shim handler responsible for all control logic
|
||||
handler := newChaincodeHandler(stream, cc)
|
||||
|
||||
// Send the ChaincodeID during register.
|
||||
chaincodeID := &peerpb.ChaincodeID{Name: chaincodename}
|
||||
payload, err := proto.Marshal(chaincodeID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error marshalling chaincodeID during chaincode registration: %s", err)
|
||||
}
|
||||
|
||||
// Register on the stream
|
||||
if err = handler.serialSend(&peerpb.ChaincodeMessage{Type: peerpb.ChaincodeMessage_REGISTER, Payload: payload}); err != nil {
|
||||
return fmt.Errorf("error sending chaincode REGISTER: %s", err)
|
||||
|
||||
}
|
||||
|
||||
// holds return values from gRPC Recv below
|
||||
type recvMsg struct {
|
||||
msg *peerpb.ChaincodeMessage
|
||||
err error
|
||||
}
|
||||
msgAvail := make(chan *recvMsg, 1)
|
||||
errc := make(chan error)
|
||||
|
||||
receiveMessage := func() {
|
||||
in, err := stream.Recv()
|
||||
msgAvail <- &recvMsg{in, err}
|
||||
}
|
||||
|
||||
go receiveMessage()
|
||||
for {
|
||||
select {
|
||||
case rmsg := <-msgAvail:
|
||||
switch {
|
||||
case rmsg.err == io.EOF:
|
||||
return errors.New("received EOF, ending chaincode stream")
|
||||
case rmsg.err != nil:
|
||||
err := fmt.Errorf("receive failed: %s", rmsg.err)
|
||||
return err
|
||||
case rmsg.msg == nil:
|
||||
err := errors.New("received nil message, ending chaincode stream")
|
||||
return err
|
||||
default:
|
||||
err := handler.handleMessage(rmsg.msg, errc)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error handling message: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
go receiveMessage()
|
||||
}
|
||||
|
||||
case sendErr := <-errc:
|
||||
if sendErr != nil {
|
||||
err := fmt.Errorf("error sending: %s", sendErr)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,756 @@
|
||||
// Copyright the Hyperledger Fabric contributors. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package shim
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes/timestamp"
|
||||
"github.com/hyperledger/fabric-protos-go/common"
|
||||
"github.com/hyperledger/fabric-protos-go/ledger/queryresult"
|
||||
pb "github.com/hyperledger/fabric-protos-go/peer"
|
||||
)
|
||||
|
||||
// ChaincodeStub is an object passed to chaincode for shim side handling of
|
||||
// APIs.
|
||||
type ChaincodeStub struct {
|
||||
TxID string
|
||||
ChannelID string
|
||||
chaincodeEvent *pb.ChaincodeEvent
|
||||
args [][]byte
|
||||
handler *Handler
|
||||
signedProposal *pb.SignedProposal
|
||||
proposal *pb.Proposal
|
||||
validationParameterMetakey string
|
||||
|
||||
// Additional fields extracted from the signedProposal
|
||||
creator []byte
|
||||
transient map[string][]byte
|
||||
binding []byte
|
||||
|
||||
decorations map[string][]byte
|
||||
}
|
||||
|
||||
// ChaincodeInvocation functionality
|
||||
|
||||
func newChaincodeStub(handler *Handler, channelID, txid string, input *pb.ChaincodeInput, signedProposal *pb.SignedProposal) (*ChaincodeStub, error) {
|
||||
stub := &ChaincodeStub{
|
||||
TxID: txid,
|
||||
ChannelID: channelID,
|
||||
args: input.Args,
|
||||
handler: handler,
|
||||
signedProposal: signedProposal,
|
||||
decorations: input.Decorations,
|
||||
validationParameterMetakey: pb.MetaDataKeys_VALIDATION_PARAMETER.String(),
|
||||
}
|
||||
|
||||
// TODO: sanity check: verify that every call to init with a nil
|
||||
// signedProposal is a legitimate one, meaning it is an internal call
|
||||
// to system chaincodes.
|
||||
if signedProposal != nil {
|
||||
var err error
|
||||
|
||||
stub.proposal = &pb.Proposal{}
|
||||
err = proto.Unmarshal(signedProposal.ProposalBytes, stub.proposal)
|
||||
if err != nil {
|
||||
|
||||
return nil, fmt.Errorf("failed to extract Proposal from SignedProposal: %s", err)
|
||||
}
|
||||
|
||||
// check for header
|
||||
if len(stub.proposal.GetHeader()) == 0 {
|
||||
return nil, errors.New("failed to extract Proposal fields: proposal header is nil")
|
||||
}
|
||||
|
||||
// Extract creator, transient, binding...
|
||||
hdr := &common.Header{}
|
||||
if err := proto.Unmarshal(stub.proposal.GetHeader(), hdr); err != nil {
|
||||
return nil, fmt.Errorf("failed to extract proposal header: %s", err)
|
||||
}
|
||||
|
||||
// extract and validate channel header
|
||||
chdr := &common.ChannelHeader{}
|
||||
if err := proto.Unmarshal(hdr.ChannelHeader, chdr); err != nil {
|
||||
return nil, fmt.Errorf("failed to extract channel header: %s", err)
|
||||
}
|
||||
validTypes := map[common.HeaderType]bool{
|
||||
common.HeaderType_ENDORSER_TRANSACTION: true,
|
||||
common.HeaderType_CONFIG: true,
|
||||
}
|
||||
if !validTypes[common.HeaderType(chdr.GetType())] {
|
||||
return nil, fmt.Errorf(
|
||||
"invalid channel header type. Expected %s or %s, received %s",
|
||||
common.HeaderType_ENDORSER_TRANSACTION,
|
||||
common.HeaderType_CONFIG,
|
||||
common.HeaderType(chdr.GetType()),
|
||||
)
|
||||
}
|
||||
|
||||
// extract creator from signature header
|
||||
shdr := &common.SignatureHeader{}
|
||||
if err := proto.Unmarshal(hdr.GetSignatureHeader(), shdr); err != nil {
|
||||
return nil, fmt.Errorf("failed to extract signature header: %s", err)
|
||||
}
|
||||
stub.creator = shdr.GetCreator()
|
||||
|
||||
// extract trasient data from proposal payload
|
||||
payload := &pb.ChaincodeProposalPayload{}
|
||||
if err := proto.Unmarshal(stub.proposal.GetPayload(), payload); err != nil {
|
||||
return nil, fmt.Errorf("failed to extract proposal payload: %s", err)
|
||||
}
|
||||
stub.transient = payload.GetTransientMap()
|
||||
|
||||
// compute the proposal binding from the nonce, creator and epoch
|
||||
epoch := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(epoch, chdr.GetEpoch())
|
||||
digest := sha256.Sum256(append(append(shdr.GetNonce(), stub.creator...), epoch...))
|
||||
stub.binding = digest[:]
|
||||
|
||||
}
|
||||
|
||||
return stub, nil
|
||||
}
|
||||
|
||||
// GetTxID returns the transaction ID for the proposal
|
||||
func (s *ChaincodeStub) GetTxID() string {
|
||||
return s.TxID
|
||||
}
|
||||
|
||||
// GetChannelID returns the channel for the proposal
|
||||
func (s *ChaincodeStub) GetChannelID() string {
|
||||
return s.ChannelID
|
||||
}
|
||||
|
||||
// GetDecorations ...
|
||||
func (s *ChaincodeStub) GetDecorations() map[string][]byte {
|
||||
return s.decorations
|
||||
}
|
||||
|
||||
// GetMSPID returns the local mspid of the peer by checking the CORE_PEER_LOCALMSPID
|
||||
// env var and returns an error if the env var is not set
|
||||
func GetMSPID() (string, error) {
|
||||
mspid := os.Getenv("CORE_PEER_LOCALMSPID")
|
||||
|
||||
if mspid == "" {
|
||||
return "", errors.New("'CORE_PEER_LOCALMSPID' is not set")
|
||||
}
|
||||
|
||||
return mspid, nil
|
||||
}
|
||||
|
||||
// ------------- Call Chaincode functions ---------------
|
||||
|
||||
// InvokeChaincode documentation can be found in interfaces.go
|
||||
func (s *ChaincodeStub) InvokeChaincode(chaincodeName string, args [][]byte, channel string) pb.Response {
|
||||
// Internally we handle chaincode name as a composite name
|
||||
if channel != "" {
|
||||
chaincodeName = chaincodeName + "/" + channel
|
||||
}
|
||||
return s.handler.handleInvokeChaincode(chaincodeName, args, s.ChannelID, s.TxID)
|
||||
}
|
||||
|
||||
// --------- State functions ----------
|
||||
|
||||
// GetState documentation can be found in interfaces.go
|
||||
func (s *ChaincodeStub) GetState(key string) ([]byte, error) {
|
||||
// Access public data by setting the collection to empty string
|
||||
collection := ""
|
||||
return s.handler.handleGetState(collection, key, s.ChannelID, s.TxID)
|
||||
}
|
||||
|
||||
// SetStateValidationParameter documentation can be found in interfaces.go
|
||||
func (s *ChaincodeStub) SetStateValidationParameter(key string, ep []byte) error {
|
||||
return s.handler.handlePutStateMetadataEntry("", key, s.validationParameterMetakey, ep, s.ChannelID, s.TxID)
|
||||
}
|
||||
|
||||
// GetStateValidationParameter documentation can be found in interfaces.go
|
||||
func (s *ChaincodeStub) GetStateValidationParameter(key string) ([]byte, error) {
|
||||
md, err := s.handler.handleGetStateMetadata("", key, s.ChannelID, s.TxID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ep, ok := md[s.validationParameterMetakey]; ok {
|
||||
return ep, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// PutState documentation can be found in interfaces.go
|
||||
func (s *ChaincodeStub) PutState(key string, value []byte) error {
|
||||
if key == "" {
|
||||
return errors.New("key must not be an empty string")
|
||||
}
|
||||
// Access public data by setting the collection to empty string
|
||||
collection := ""
|
||||
return s.handler.handlePutState(collection, key, value, s.ChannelID, s.TxID)
|
||||
}
|
||||
|
||||
func (s *ChaincodeStub) createStateQueryIterator(response *pb.QueryResponse) *StateQueryIterator {
|
||||
return &StateQueryIterator{
|
||||
CommonIterator: &CommonIterator{
|
||||
handler: s.handler,
|
||||
channelID: s.ChannelID,
|
||||
txid: s.TxID,
|
||||
response: response,
|
||||
currentLoc: 0,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetQueryResult documentation can be found in interfaces.go
|
||||
func (s *ChaincodeStub) GetQueryResult(query string) (StateQueryIteratorInterface, error) {
|
||||
// Access public data by setting the collection to empty string
|
||||
collection := ""
|
||||
// ignore QueryResponseMetadata as it is not applicable for a rich query without pagination
|
||||
iterator, _, err := s.handleGetQueryResult(collection, query, nil)
|
||||
|
||||
return iterator, err
|
||||
}
|
||||
|
||||
// DelState documentation can be found in interfaces.go
|
||||
func (s *ChaincodeStub) DelState(key string) error {
|
||||
// Access public data by setting the collection to empty string
|
||||
collection := ""
|
||||
return s.handler.handleDelState(collection, key, s.ChannelID, s.TxID)
|
||||
}
|
||||
|
||||
// --------- private state functions ---------
|
||||
|
||||
// GetPrivateData documentation can be found in interfaces.go
|
||||
func (s *ChaincodeStub) GetPrivateData(collection string, key string) ([]byte, error) {
|
||||
if collection == "" {
|
||||
return nil, fmt.Errorf("collection must not be an empty string")
|
||||
}
|
||||
return s.handler.handleGetState(collection, key, s.ChannelID, s.TxID)
|
||||
}
|
||||
|
||||
// GetPrivateDataHash documentation can be found in interfaces.go
|
||||
func (s *ChaincodeStub) GetPrivateDataHash(collection string, key string) ([]byte, error) {
|
||||
if collection == "" {
|
||||
return nil, fmt.Errorf("collection must not be an empty string")
|
||||
}
|
||||
return s.handler.handleGetPrivateDataHash(collection, key, s.ChannelID, s.TxID)
|
||||
}
|
||||
|
||||
// PutPrivateData documentation can be found in interfaces.go
|
||||
func (s *ChaincodeStub) PutPrivateData(collection string, key string, value []byte) error {
|
||||
if collection == "" {
|
||||
return fmt.Errorf("collection must not be an empty string")
|
||||
}
|
||||
if key == "" {
|
||||
return fmt.Errorf("key must not be an empty string")
|
||||
}
|
||||
return s.handler.handlePutState(collection, key, value, s.ChannelID, s.TxID)
|
||||
}
|
||||
|
||||
// DelPrivateData documentation can be found in interfaces.go
|
||||
func (s *ChaincodeStub) DelPrivateData(collection string, key string) error {
|
||||
if collection == "" {
|
||||
return fmt.Errorf("collection must not be an empty string")
|
||||
}
|
||||
return s.handler.handleDelState(collection, key, s.ChannelID, s.TxID)
|
||||
}
|
||||
|
||||
// GetPrivateDataByRange documentation can be found in interfaces.go
|
||||
func (s *ChaincodeStub) GetPrivateDataByRange(collection, startKey, endKey string) (StateQueryIteratorInterface, error) {
|
||||
if collection == "" {
|
||||
return nil, fmt.Errorf("collection must not be an empty string")
|
||||
}
|
||||
if startKey == "" {
|
||||
startKey = emptyKeySubstitute
|
||||
}
|
||||
if err := validateSimpleKeys(startKey, endKey); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// ignore QueryResponseMetadata as it is not applicable for a range query without pagination
|
||||
iterator, _, err := s.handleGetStateByRange(collection, startKey, endKey, nil)
|
||||
|
||||
return iterator, err
|
||||
}
|
||||
|
||||
func (s *ChaincodeStub) createRangeKeysForPartialCompositeKey(objectType string, attributes []string) (string, string, error) {
|
||||
partialCompositeKey, err := s.CreateCompositeKey(objectType, attributes)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
startKey := partialCompositeKey
|
||||
endKey := partialCompositeKey + string(maxUnicodeRuneValue)
|
||||
|
||||
return startKey, endKey, nil
|
||||
}
|
||||
|
||||
// GetPrivateDataByPartialCompositeKey documentation can be found in interfaces.go
|
||||
func (s *ChaincodeStub) GetPrivateDataByPartialCompositeKey(collection, objectType string, attributes []string) (StateQueryIteratorInterface, error) {
|
||||
if collection == "" {
|
||||
return nil, fmt.Errorf("collection must not be an empty string")
|
||||
}
|
||||
|
||||
startKey, endKey, err := s.createRangeKeysForPartialCompositeKey(objectType, attributes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// ignore QueryResponseMetadata as it is not applicable for a partial composite key query without pagination
|
||||
iterator, _, err := s.handleGetStateByRange(collection, startKey, endKey, nil)
|
||||
|
||||
return iterator, err
|
||||
}
|
||||
|
||||
// GetPrivateDataQueryResult documentation can be found in interfaces.go
|
||||
func (s *ChaincodeStub) GetPrivateDataQueryResult(collection, query string) (StateQueryIteratorInterface, error) {
|
||||
if collection == "" {
|
||||
return nil, fmt.Errorf("collection must not be an empty string")
|
||||
}
|
||||
// ignore QueryResponseMetadata as it is not applicable for a range query without pagination
|
||||
iterator, _, err := s.handleGetQueryResult(collection, query, nil)
|
||||
|
||||
return iterator, err
|
||||
}
|
||||
|
||||
// GetPrivateDataValidationParameter documentation can be found in interfaces.go
|
||||
func (s *ChaincodeStub) GetPrivateDataValidationParameter(collection, key string) ([]byte, error) {
|
||||
md, err := s.handler.handleGetStateMetadata(collection, key, s.ChannelID, s.TxID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ep, ok := md[s.validationParameterMetakey]; ok {
|
||||
return ep, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// SetPrivateDataValidationParameter documentation can be found in interfaces.go
|
||||
func (s *ChaincodeStub) SetPrivateDataValidationParameter(collection, key string, ep []byte) error {
|
||||
return s.handler.handlePutStateMetadataEntry(collection, key, s.validationParameterMetakey, ep, s.ChannelID, s.TxID)
|
||||
}
|
||||
|
||||
// CommonIterator documentation can be found in interfaces.go
|
||||
type CommonIterator struct {
|
||||
handler *Handler
|
||||
channelID string
|
||||
txid string
|
||||
response *pb.QueryResponse
|
||||
currentLoc int
|
||||
}
|
||||
|
||||
// StateQueryIterator documentation can be found in interfaces.go
|
||||
type StateQueryIterator struct {
|
||||
*CommonIterator
|
||||
}
|
||||
|
||||
// HistoryQueryIterator documentation can be found in interfaces.go
|
||||
type HistoryQueryIterator struct {
|
||||
*CommonIterator
|
||||
}
|
||||
|
||||
// General interface for supporting different types of query results.
|
||||
// Actual types differ for different queries
|
||||
type queryResult interface{}
|
||||
|
||||
type resultType uint8
|
||||
|
||||
// TODO: Document constants
|
||||
/*
|
||||
Constants ...
|
||||
*/
|
||||
const (
|
||||
StateQueryResult resultType = iota + 1
|
||||
HistoryQueryResult
|
||||
)
|
||||
|
||||
func createQueryResponseMetadata(metadataBytes []byte) (*pb.QueryResponseMetadata, error) {
|
||||
metadata := &pb.QueryResponseMetadata{}
|
||||
err := proto.Unmarshal(metadataBytes, metadata)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
func (s *ChaincodeStub) handleGetStateByRange(collection, startKey, endKey string,
|
||||
metadata []byte) (StateQueryIteratorInterface, *pb.QueryResponseMetadata, error) {
|
||||
|
||||
response, err := s.handler.handleGetStateByRange(collection, startKey, endKey, metadata, s.ChannelID, s.TxID)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
iterator := s.createStateQueryIterator(response)
|
||||
responseMetadata, err := createQueryResponseMetadata(response.Metadata)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return iterator, responseMetadata, nil
|
||||
}
|
||||
|
||||
func (s *ChaincodeStub) handleGetQueryResult(collection, query string,
|
||||
metadata []byte) (StateQueryIteratorInterface, *pb.QueryResponseMetadata, error) {
|
||||
|
||||
response, err := s.handler.handleGetQueryResult(collection, query, metadata, s.ChannelID, s.TxID)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
iterator := s.createStateQueryIterator(response)
|
||||
responseMetadata, err := createQueryResponseMetadata(response.Metadata)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return iterator, responseMetadata, nil
|
||||
}
|
||||
|
||||
// GetStateByRange documentation can be found in interfaces.go
|
||||
func (s *ChaincodeStub) GetStateByRange(startKey, endKey string) (StateQueryIteratorInterface, error) {
|
||||
if startKey == "" {
|
||||
startKey = emptyKeySubstitute
|
||||
}
|
||||
if err := validateSimpleKeys(startKey, endKey); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
collection := ""
|
||||
|
||||
// ignore QueryResponseMetadata as it is not applicable for a range query without pagination
|
||||
iterator, _, err := s.handleGetStateByRange(collection, startKey, endKey, nil)
|
||||
|
||||
return iterator, err
|
||||
}
|
||||
|
||||
// GetHistoryForKey documentation can be found in interfaces.go
|
||||
func (s *ChaincodeStub) GetHistoryForKey(key string) (HistoryQueryIteratorInterface, error) {
|
||||
response, err := s.handler.handleGetHistoryForKey(key, s.ChannelID, s.TxID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &HistoryQueryIterator{CommonIterator: &CommonIterator{s.handler, s.ChannelID, s.TxID, response, 0}}, nil
|
||||
}
|
||||
|
||||
//CreateCompositeKey documentation can be found in interfaces.go
|
||||
func (s *ChaincodeStub) CreateCompositeKey(objectType string, attributes []string) (string, error) {
|
||||
return CreateCompositeKey(objectType, attributes)
|
||||
}
|
||||
|
||||
//SplitCompositeKey documentation can be found in interfaces.go
|
||||
func (s *ChaincodeStub) SplitCompositeKey(compositeKey string) (string, []string, error) {
|
||||
return splitCompositeKey(compositeKey)
|
||||
}
|
||||
|
||||
// CreateCompositeKey ...
|
||||
func CreateCompositeKey(objectType string, attributes []string) (string, error) {
|
||||
if err := validateCompositeKeyAttribute(objectType); err != nil {
|
||||
return "", err
|
||||
}
|
||||
ck := compositeKeyNamespace + objectType + string(minUnicodeRuneValue)
|
||||
for _, att := range attributes {
|
||||
if err := validateCompositeKeyAttribute(att); err != nil {
|
||||
return "", err
|
||||
}
|
||||
ck += att + string(minUnicodeRuneValue)
|
||||
}
|
||||
return ck, nil
|
||||
}
|
||||
|
||||
func splitCompositeKey(compositeKey string) (string, []string, error) {
|
||||
componentIndex := 1
|
||||
components := []string{}
|
||||
for i := 1; i < len(compositeKey); i++ {
|
||||
if compositeKey[i] == minUnicodeRuneValue {
|
||||
components = append(components, compositeKey[componentIndex:i])
|
||||
componentIndex = i + 1
|
||||
}
|
||||
}
|
||||
return components[0], components[1:], nil
|
||||
}
|
||||
|
||||
func validateCompositeKeyAttribute(str string) error {
|
||||
if !utf8.ValidString(str) {
|
||||
return fmt.Errorf("not a valid utf8 string: [%x]", str)
|
||||
}
|
||||
for index, runeValue := range str {
|
||||
if runeValue == minUnicodeRuneValue || runeValue == maxUnicodeRuneValue {
|
||||
return fmt.Errorf(`input contains unicode %#U starting at position [%d]. %#U and %#U are not allowed in the input attribute of a composite key`,
|
||||
runeValue, index, minUnicodeRuneValue, maxUnicodeRuneValue)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//To ensure that simple keys do not go into composite key namespace,
|
||||
//we validate simplekey to check whether the key starts with 0x00 (which
|
||||
//is the namespace for compositeKey). This helps in avoding simple/composite
|
||||
//key collisions.
|
||||
func validateSimpleKeys(simpleKeys ...string) error {
|
||||
for _, key := range simpleKeys {
|
||||
if len(key) > 0 && key[0] == compositeKeyNamespace[0] {
|
||||
return fmt.Errorf(`first character of the key [%s] contains a null character which is not allowed`, key)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//GetStateByPartialCompositeKey function can be invoked by a chaincode to query the
|
||||
//state based on a given partial composite key. This function returns an
|
||||
//iterator which can be used to iterate over all composite keys whose prefix
|
||||
//matches the given partial composite key. This function should be used only for
|
||||
//a partial composite key. For a full composite key, an iter with empty response
|
||||
//would be returned.
|
||||
func (s *ChaincodeStub) GetStateByPartialCompositeKey(objectType string, attributes []string) (StateQueryIteratorInterface, error) {
|
||||
collection := ""
|
||||
startKey, endKey, err := s.createRangeKeysForPartialCompositeKey(objectType, attributes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// ignore QueryResponseMetadata as it is not applicable for a partial composite key query without pagination
|
||||
iterator, _, err := s.handleGetStateByRange(collection, startKey, endKey, nil)
|
||||
|
||||
return iterator, err
|
||||
}
|
||||
|
||||
func createQueryMetadata(pageSize int32, bookmark string) ([]byte, error) {
|
||||
// Construct the QueryMetadata with a page size and a bookmark needed for pagination
|
||||
metadata := &pb.QueryMetadata{PageSize: pageSize, Bookmark: bookmark}
|
||||
metadataBytes, err := proto.Marshal(metadata)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return metadataBytes, nil
|
||||
}
|
||||
|
||||
// GetStateByRangeWithPagination ...
|
||||
func (s *ChaincodeStub) GetStateByRangeWithPagination(startKey, endKey string, pageSize int32,
|
||||
bookmark string) (StateQueryIteratorInterface, *pb.QueryResponseMetadata, error) {
|
||||
|
||||
if startKey == "" {
|
||||
startKey = emptyKeySubstitute
|
||||
}
|
||||
if err := validateSimpleKeys(startKey, endKey); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
collection := ""
|
||||
|
||||
metadata, err := createQueryMetadata(pageSize, bookmark)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return s.handleGetStateByRange(collection, startKey, endKey, metadata)
|
||||
}
|
||||
|
||||
// GetStateByPartialCompositeKeyWithPagination ...
|
||||
func (s *ChaincodeStub) GetStateByPartialCompositeKeyWithPagination(objectType string, keys []string,
|
||||
pageSize int32, bookmark string) (StateQueryIteratorInterface, *pb.QueryResponseMetadata, error) {
|
||||
|
||||
collection := ""
|
||||
|
||||
metadata, err := createQueryMetadata(pageSize, bookmark)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
startKey, endKey, err := s.createRangeKeysForPartialCompositeKey(objectType, keys)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return s.handleGetStateByRange(collection, startKey, endKey, metadata)
|
||||
}
|
||||
|
||||
// GetQueryResultWithPagination ...
|
||||
func (s *ChaincodeStub) GetQueryResultWithPagination(query string, pageSize int32,
|
||||
bookmark string) (StateQueryIteratorInterface, *pb.QueryResponseMetadata, error) {
|
||||
// Access public data by setting the collection to empty string
|
||||
collection := ""
|
||||
|
||||
metadata, err := createQueryMetadata(pageSize, bookmark)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return s.handleGetQueryResult(collection, query, metadata)
|
||||
}
|
||||
|
||||
// Next ...
|
||||
func (iter *StateQueryIterator) Next() (*queryresult.KV, error) {
|
||||
result, err := iter.nextResult(StateQueryResult)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result.(*queryresult.KV), err
|
||||
}
|
||||
|
||||
// Next ...
|
||||
func (iter *HistoryQueryIterator) Next() (*queryresult.KeyModification, error) {
|
||||
result, err := iter.nextResult(HistoryQueryResult)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result.(*queryresult.KeyModification), err
|
||||
}
|
||||
|
||||
// HasNext documentation can be found in interfaces.go
|
||||
func (iter *CommonIterator) HasNext() bool {
|
||||
if iter.currentLoc < len(iter.response.Results) || iter.response.HasMore {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// getResultsFromBytes deserializes QueryResult and return either a KV struct
|
||||
// or KeyModification depending on the result type (i.e., state (range/execute)
|
||||
// query, history query). Note that queryResult is an empty golang
|
||||
// interface that can hold values of any type.
|
||||
func (iter *CommonIterator) getResultFromBytes(queryResultBytes *pb.QueryResultBytes,
|
||||
rType resultType) (queryResult, error) {
|
||||
|
||||
if rType == StateQueryResult {
|
||||
stateQueryResult := &queryresult.KV{}
|
||||
if err := proto.Unmarshal(queryResultBytes.ResultBytes, stateQueryResult); err != nil {
|
||||
return nil, fmt.Errorf("error unmarshaling result from bytes: %s", err)
|
||||
}
|
||||
return stateQueryResult, nil
|
||||
|
||||
} else if rType == HistoryQueryResult {
|
||||
historyQueryResult := &queryresult.KeyModification{}
|
||||
if err := proto.Unmarshal(queryResultBytes.ResultBytes, historyQueryResult); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return historyQueryResult, nil
|
||||
}
|
||||
return nil, errors.New("wrong result type")
|
||||
}
|
||||
|
||||
func (iter *CommonIterator) fetchNextQueryResult() error {
|
||||
response, err := iter.handler.handleQueryStateNext(iter.response.Id, iter.channelID, iter.txid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
iter.currentLoc = 0
|
||||
iter.response = response
|
||||
return nil
|
||||
}
|
||||
|
||||
// nextResult returns the next QueryResult (i.e., either a KV struct or KeyModification)
|
||||
// from the state or history query iterator. Note that queryResult is an
|
||||
// empty golang interface that can hold values of any type.
|
||||
func (iter *CommonIterator) nextResult(rType resultType) (queryResult, error) {
|
||||
if iter.currentLoc < len(iter.response.Results) {
|
||||
// On valid access of an element from cached results
|
||||
queryResult, err := iter.getResultFromBytes(iter.response.Results[iter.currentLoc], rType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
iter.currentLoc++
|
||||
|
||||
if iter.currentLoc == len(iter.response.Results) && iter.response.HasMore {
|
||||
// On access of last item, pre-fetch to update HasMore flag
|
||||
if err = iter.fetchNextQueryResult(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return queryResult, err
|
||||
} else if !iter.response.HasMore {
|
||||
// On call to Next() without check of HasMore
|
||||
return nil, errors.New("no such key")
|
||||
}
|
||||
|
||||
// should not fall through here
|
||||
// case: no cached results but HasMore is true.
|
||||
return nil, errors.New("invalid iterator state")
|
||||
}
|
||||
|
||||
// Close documentation can be found in interfaces.go
|
||||
func (iter *CommonIterator) Close() error {
|
||||
_, err := iter.handler.handleQueryStateClose(iter.response.Id, iter.channelID, iter.txid)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetArgs documentation can be found in interfaces.go
|
||||
func (s *ChaincodeStub) GetArgs() [][]byte {
|
||||
return s.args
|
||||
}
|
||||
|
||||
// GetStringArgs documentation can be found in interfaces.go
|
||||
func (s *ChaincodeStub) GetStringArgs() []string {
|
||||
args := s.GetArgs()
|
||||
strargs := make([]string, 0, len(args))
|
||||
for _, barg := range args {
|
||||
strargs = append(strargs, string(barg))
|
||||
}
|
||||
return strargs
|
||||
}
|
||||
|
||||
// GetFunctionAndParameters documentation can be found in interfaces.go
|
||||
func (s *ChaincodeStub) GetFunctionAndParameters() (function string, params []string) {
|
||||
allargs := s.GetStringArgs()
|
||||
function = ""
|
||||
params = []string{}
|
||||
if len(allargs) >= 1 {
|
||||
function = allargs[0]
|
||||
params = allargs[1:]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetCreator documentation can be found in interfaces.go
|
||||
func (s *ChaincodeStub) GetCreator() ([]byte, error) {
|
||||
return s.creator, nil
|
||||
}
|
||||
|
||||
// GetTransient documentation can be found in interfaces.go
|
||||
func (s *ChaincodeStub) GetTransient() (map[string][]byte, error) {
|
||||
return s.transient, nil
|
||||
}
|
||||
|
||||
// GetBinding documentation can be found in interfaces.go
|
||||
func (s *ChaincodeStub) GetBinding() ([]byte, error) {
|
||||
return s.binding, nil
|
||||
}
|
||||
|
||||
// GetSignedProposal documentation can be found in interfaces.go
|
||||
func (s *ChaincodeStub) GetSignedProposal() (*pb.SignedProposal, error) {
|
||||
return s.signedProposal, nil
|
||||
}
|
||||
|
||||
// GetArgsSlice documentation can be found in interfaces.go
|
||||
func (s *ChaincodeStub) GetArgsSlice() ([]byte, error) {
|
||||
args := s.GetArgs()
|
||||
res := []byte{}
|
||||
for _, barg := range args {
|
||||
res = append(res, barg...)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// GetTxTimestamp documentation can be found in interfaces.go
|
||||
func (s *ChaincodeStub) GetTxTimestamp() (*timestamp.Timestamp, error) {
|
||||
hdr := &common.Header{}
|
||||
if err := proto.Unmarshal(s.proposal.Header, hdr); err != nil {
|
||||
return nil, fmt.Errorf("error unmarshaling Header: %s", err)
|
||||
}
|
||||
|
||||
chdr := &common.ChannelHeader{}
|
||||
if err := proto.Unmarshal(hdr.ChannelHeader, chdr); err != nil {
|
||||
return nil, fmt.Errorf("error unmarshaling ChannelHeader: %s", err)
|
||||
}
|
||||
|
||||
return chdr.GetTimestamp(), nil
|
||||
}
|
||||
|
||||
// ------------- ChaincodeEvent API ----------------------
|
||||
|
||||
// SetEvent documentation can be found in interfaces.go
|
||||
func (s *ChaincodeStub) SetEvent(name string, payload []byte) error {
|
||||
if name == "" {
|
||||
return errors.New("event name can not be empty string")
|
||||
}
|
||||
s.chaincodeEvent = &pb.ChaincodeEvent{EventName: name, Payload: payload}
|
||||
return nil
|
||||
}
|
@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
371
chaincode/vendor/github.com/hyperledger/fabric-protos-go/common/collection.pb.go
generated
vendored
371
chaincode/vendor/github.com/hyperledger/fabric-protos-go/common/collection.pb.go
generated
vendored
@ -0,0 +1,925 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: common/common.proto
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
timestamp "github.com/golang/protobuf/ptypes/timestamp"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// These status codes are intended to resemble selected HTTP status codes
|
||||
type Status int32
|
||||
|
||||
const (
|
||||
Status_UNKNOWN Status = 0
|
||||
Status_SUCCESS Status = 200
|
||||
Status_BAD_REQUEST Status = 400
|
||||
Status_FORBIDDEN Status = 403
|
||||
Status_NOT_FOUND Status = 404
|
||||
Status_REQUEST_ENTITY_TOO_LARGE Status = 413
|
||||
Status_INTERNAL_SERVER_ERROR Status = 500
|
||||
Status_NOT_IMPLEMENTED Status = 501
|
||||
Status_SERVICE_UNAVAILABLE Status = 503
|
||||
)
|
||||
|
||||
var Status_name = map[int32]string{
|
||||
0: "UNKNOWN",
|
||||
200: "SUCCESS",
|
||||
400: "BAD_REQUEST",
|
||||
403: "FORBIDDEN",
|
||||
404: "NOT_FOUND",
|
||||
413: "REQUEST_ENTITY_TOO_LARGE",
|
||||
500: "INTERNAL_SERVER_ERROR",
|
||||
501: "NOT_IMPLEMENTED",
|
||||
503: "SERVICE_UNAVAILABLE",
|
||||
}
|
||||
|
||||
var Status_value = map[string]int32{
|
||||
"UNKNOWN": 0,
|
||||
"SUCCESS": 200,
|
||||
"BAD_REQUEST": 400,
|
||||
"FORBIDDEN": 403,
|
||||
"NOT_FOUND": 404,
|
||||
"REQUEST_ENTITY_TOO_LARGE": 413,
|
||||
"INTERNAL_SERVER_ERROR": 500,
|
||||
"NOT_IMPLEMENTED": 501,
|
||||
"SERVICE_UNAVAILABLE": 503,
|
||||
}
|
||||
|
||||
func (x Status) String() string {
|
||||
return proto.EnumName(Status_name, int32(x))
|
||||
}
|
||||
|
||||
func (Status) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_8f954d82c0b891f6, []int{0}
|
||||
}
|
||||
|
||||
type HeaderType int32
|
||||
|
||||
const (
|
||||
HeaderType_MESSAGE HeaderType = 0
|
||||
HeaderType_CONFIG HeaderType = 1
|
||||
HeaderType_CONFIG_UPDATE HeaderType = 2
|
||||
HeaderType_ENDORSER_TRANSACTION HeaderType = 3
|
||||
HeaderType_ORDERER_TRANSACTION HeaderType = 4
|
||||
HeaderType_DELIVER_SEEK_INFO HeaderType = 5
|
||||
HeaderType_CHAINCODE_PACKAGE HeaderType = 6
|
||||
)
|
||||
|
||||
var HeaderType_name = map[int32]string{
|
||||
0: "MESSAGE",
|
||||
1: "CONFIG",
|
||||
2: "CONFIG_UPDATE",
|
||||
3: "ENDORSER_TRANSACTION",
|
||||
4: "ORDERER_TRANSACTION",
|
||||
5: "DELIVER_SEEK_INFO",
|
||||
6: "CHAINCODE_PACKAGE",
|
||||
}
|
||||
|
||||
var HeaderType_value = map[string]int32{
|
||||
"MESSAGE": 0,
|
||||
"CONFIG": 1,
|
||||
"CONFIG_UPDATE": 2,
|
||||
"ENDORSER_TRANSACTION": 3,
|
||||
"ORDERER_TRANSACTION": 4,
|
||||
"DELIVER_SEEK_INFO": 5,
|
||||
"CHAINCODE_PACKAGE": 6,
|
||||
}
|
||||
|
||||
func (x HeaderType) String() string {
|
||||
return proto.EnumName(HeaderType_name, int32(x))
|
||||
}
|
||||
|
||||
func (HeaderType) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_8f954d82c0b891f6, []int{1}
|
||||
}
|
||||
|
||||
// This enum enlists indexes of the block metadata array
|
||||
type BlockMetadataIndex int32
|
||||
|
||||
const (
|
||||
BlockMetadataIndex_SIGNATURES BlockMetadataIndex = 0
|
||||
BlockMetadataIndex_LAST_CONFIG BlockMetadataIndex = 1 // Deprecated: Do not use.
|
||||
BlockMetadataIndex_TRANSACTIONS_FILTER BlockMetadataIndex = 2
|
||||
BlockMetadataIndex_ORDERER BlockMetadataIndex = 3 // Deprecated: Do not use.
|
||||
BlockMetadataIndex_COMMIT_HASH BlockMetadataIndex = 4
|
||||
)
|
||||
|
||||
var BlockMetadataIndex_name = map[int32]string{
|
||||
0: "SIGNATURES",
|
||||
1: "LAST_CONFIG",
|
||||
2: "TRANSACTIONS_FILTER",
|
||||
3: "ORDERER",
|
||||
4: "COMMIT_HASH",
|
||||
}
|
||||
|
||||
var BlockMetadataIndex_value = map[string]int32{
|
||||
"SIGNATURES": 0,
|
||||
"LAST_CONFIG": 1,
|
||||
"TRANSACTIONS_FILTER": 2,
|
||||
"ORDERER": 3,
|
||||
"COMMIT_HASH": 4,
|
||||
}
|
||||
|
||||
func (x BlockMetadataIndex) String() string {
|
||||
return proto.EnumName(BlockMetadataIndex_name, int32(x))
|
||||
}
|
||||
|
||||
func (BlockMetadataIndex) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_8f954d82c0b891f6, []int{2}
|
||||
}
|
||||
|
||||
// LastConfig is the encoded value for the Metadata message which is encoded in the LAST_CONFIGURATION block metadata index
|
||||
type LastConfig struct {
|
||||
Index uint64 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *LastConfig) Reset() { *m = LastConfig{} }
|
||||
func (m *LastConfig) String() string { return proto.CompactTextString(m) }
|
||||
func (*LastConfig) ProtoMessage() {}
|
||||
func (*LastConfig) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_8f954d82c0b891f6, []int{0}
|
||||
}
|
||||
|
||||
func (m *LastConfig) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_LastConfig.Unmarshal(m, b)
|
||||
}
|
||||
func (m *LastConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_LastConfig.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *LastConfig) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_LastConfig.Merge(m, src)
|
||||
}
|
||||
func (m *LastConfig) XXX_Size() int {
|
||||
return xxx_messageInfo_LastConfig.Size(m)
|
||||
}
|
||||
func (m *LastConfig) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_LastConfig.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_LastConfig proto.InternalMessageInfo
|
||||
|
||||
func (m *LastConfig) GetIndex() uint64 {
|
||||
if m != nil {
|
||||
return m.Index
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Metadata is a common structure to be used to encode block metadata
|
||||
type Metadata struct {
|
||||
Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
|
||||
Signatures []*MetadataSignature `protobuf:"bytes,2,rep,name=signatures,proto3" json:"signatures,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Metadata) Reset() { *m = Metadata{} }
|
||||
func (m *Metadata) String() string { return proto.CompactTextString(m) }
|
||||
func (*Metadata) ProtoMessage() {}
|
||||
func (*Metadata) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_8f954d82c0b891f6, []int{1}
|
||||
}
|
||||
|
||||
func (m *Metadata) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Metadata.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Metadata.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Metadata) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Metadata.Merge(m, src)
|
||||
}
|
||||
func (m *Metadata) XXX_Size() int {
|
||||
return xxx_messageInfo_Metadata.Size(m)
|
||||
}
|
||||
func (m *Metadata) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Metadata.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Metadata proto.InternalMessageInfo
|
||||
|
||||
func (m *Metadata) GetValue() []byte {
|
||||
if m != nil {
|
||||
return m.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Metadata) GetSignatures() []*MetadataSignature {
|
||||
if m != nil {
|
||||
return m.Signatures
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type MetadataSignature struct {
|
||||
SignatureHeader []byte `protobuf:"bytes,1,opt,name=signature_header,json=signatureHeader,proto3" json:"signature_header,omitempty"`
|
||||
Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *MetadataSignature) Reset() { *m = MetadataSignature{} }
|
||||
func (m *MetadataSignature) String() string { return proto.CompactTextString(m) }
|
||||
func (*MetadataSignature) ProtoMessage() {}
|
||||
func (*MetadataSignature) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_8f954d82c0b891f6, []int{2}
|
||||
}
|
||||
|
||||
func (m *MetadataSignature) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_MetadataSignature.Unmarshal(m, b)
|
||||
}
|
||||
func (m *MetadataSignature) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_MetadataSignature.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *MetadataSignature) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_MetadataSignature.Merge(m, src)
|
||||
}
|
||||
func (m *MetadataSignature) XXX_Size() int {
|
||||
return xxx_messageInfo_MetadataSignature.Size(m)
|
||||
}
|
||||
func (m *MetadataSignature) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_MetadataSignature.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_MetadataSignature proto.InternalMessageInfo
|
||||
|
||||
func (m *MetadataSignature) GetSignatureHeader() []byte {
|
||||
if m != nil {
|
||||
return m.SignatureHeader
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MetadataSignature) GetSignature() []byte {
|
||||
if m != nil {
|
||||
return m.Signature
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Header struct {
|
||||
ChannelHeader []byte `protobuf:"bytes,1,opt,name=channel_header,json=channelHeader,proto3" json:"channel_header,omitempty"`
|
||||
SignatureHeader []byte `protobuf:"bytes,2,opt,name=signature_header,json=signatureHeader,proto3" json:"signature_header,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Header) Reset() { *m = Header{} }
|
||||
func (m *Header) String() string { return proto.CompactTextString(m) }
|
||||
func (*Header) ProtoMessage() {}
|
||||
func (*Header) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_8f954d82c0b891f6, []int{3}
|
||||
}
|
||||
|
||||
func (m *Header) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Header.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Header.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Header) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Header.Merge(m, src)
|
||||
}
|
||||
func (m *Header) XXX_Size() int {
|
||||
return xxx_messageInfo_Header.Size(m)
|
||||
}
|
||||
func (m *Header) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Header.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Header proto.InternalMessageInfo
|
||||
|
||||
func (m *Header) GetChannelHeader() []byte {
|
||||
if m != nil {
|
||||
return m.ChannelHeader
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Header) GetSignatureHeader() []byte {
|
||||
if m != nil {
|
||||
return m.SignatureHeader
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Header is a generic replay prevention and identity message to include in a signed payload
|
||||
type ChannelHeader struct {
|
||||
Type int32 `protobuf:"varint,1,opt,name=type,proto3" json:"type,omitempty"`
|
||||
// Version indicates message protocol version
|
||||
Version int32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"`
|
||||
// Timestamp is the local time when the message was created
|
||||
// by the sender
|
||||
Timestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
||||
// Identifier of the channel this message is bound for
|
||||
ChannelId string `protobuf:"bytes,4,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"`
|
||||
// An unique identifier that is used end-to-end.
|
||||
// - set by higher layers such as end user or SDK
|
||||
// - passed to the endorser (which will check for uniqueness)
|
||||
// - as the header is passed along unchanged, it will be
|
||||
// be retrieved by the committer (uniqueness check here as well)
|
||||
// - to be stored in the ledger
|
||||
TxId string `protobuf:"bytes,5,opt,name=tx_id,json=txId,proto3" json:"tx_id,omitempty"`
|
||||
// The epoch in which this header was generated, where epoch is defined based on block height
|
||||
// Epoch in which the response has been generated. This field identifies a
|
||||
// logical window of time. A proposal response is accepted by a peer only if
|
||||
// two conditions hold:
|
||||
// 1. the epoch specified in the message is the current epoch
|
||||
// 2. this message has been only seen once during this epoch (i.e. it hasn't
|
||||
// been replayed)
|
||||
Epoch uint64 `protobuf:"varint,6,opt,name=epoch,proto3" json:"epoch,omitempty"`
|
||||
// Extension that may be attached based on the header type
|
||||
Extension []byte `protobuf:"bytes,7,opt,name=extension,proto3" json:"extension,omitempty"`
|
||||
// If mutual TLS is employed, this represents
|
||||
// the hash of the client's TLS certificate
|
||||
TlsCertHash []byte `protobuf:"bytes,8,opt,name=tls_cert_hash,json=tlsCertHash,proto3" json:"tls_cert_hash,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ChannelHeader) Reset() { *m = ChannelHeader{} }
|
||||
func (m *ChannelHeader) String() string { return proto.CompactTextString(m) }
|
||||
func (*ChannelHeader) ProtoMessage() {}
|
||||
func (*ChannelHeader) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_8f954d82c0b891f6, []int{4}
|
||||
}
|
||||
|
||||
func (m *ChannelHeader) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ChannelHeader.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ChannelHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ChannelHeader.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ChannelHeader) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ChannelHeader.Merge(m, src)
|
||||
}
|
||||
func (m *ChannelHeader) XXX_Size() int {
|
||||
return xxx_messageInfo_ChannelHeader.Size(m)
|
||||
}
|
||||
func (m *ChannelHeader) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ChannelHeader.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ChannelHeader proto.InternalMessageInfo
|
||||
|
||||
func (m *ChannelHeader) GetType() int32 {
|
||||
if m != nil {
|
||||
return m.Type
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ChannelHeader) GetVersion() int32 {
|
||||
if m != nil {
|
||||
return m.Version
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ChannelHeader) GetTimestamp() *timestamp.Timestamp {
|
||||
if m != nil {
|
||||
return m.Timestamp
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ChannelHeader) GetChannelId() string {
|
||||
if m != nil {
|
||||
return m.ChannelId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ChannelHeader) GetTxId() string {
|
||||
if m != nil {
|
||||
return m.TxId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ChannelHeader) GetEpoch() uint64 {
|
||||
if m != nil {
|
||||
return m.Epoch
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ChannelHeader) GetExtension() []byte {
|
||||
if m != nil {
|
||||
return m.Extension
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ChannelHeader) GetTlsCertHash() []byte {
|
||||
if m != nil {
|
||||
return m.TlsCertHash
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type SignatureHeader struct {
|
||||
// Creator of the message, a marshaled msp.SerializedIdentity
|
||||
Creator []byte `protobuf:"bytes,1,opt,name=creator,proto3" json:"creator,omitempty"`
|
||||
// Arbitrary number that may only be used once. Can be used to detect replay attacks.
|
||||
Nonce []byte `protobuf:"bytes,2,opt,name=nonce,proto3" json:"nonce,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *SignatureHeader) Reset() { *m = SignatureHeader{} }
|
||||
func (m *SignatureHeader) String() string { return proto.CompactTextString(m) }
|
||||
func (*SignatureHeader) ProtoMessage() {}
|
||||
func (*SignatureHeader) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_8f954d82c0b891f6, []int{5}
|
||||
}
|
||||
|
||||
func (m *SignatureHeader) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_SignatureHeader.Unmarshal(m, b)
|
||||
}
|
||||
func (m *SignatureHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_SignatureHeader.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *SignatureHeader) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_SignatureHeader.Merge(m, src)
|
||||
}
|
||||
func (m *SignatureHeader) XXX_Size() int {
|
||||
return xxx_messageInfo_SignatureHeader.Size(m)
|
||||
}
|
||||
func (m *SignatureHeader) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_SignatureHeader.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_SignatureHeader proto.InternalMessageInfo
|
||||
|
||||
func (m *SignatureHeader) GetCreator() []byte {
|
||||
if m != nil {
|
||||
return m.Creator
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *SignatureHeader) GetNonce() []byte {
|
||||
if m != nil {
|
||||
return m.Nonce
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Payload is the message contents (and header to allow for signing)
|
||||
type Payload struct {
|
||||
// Header is included to provide identity and prevent replay
|
||||
Header *Header `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
|
||||
// Data, the encoding of which is defined by the type in the header
|
||||
Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Payload) Reset() { *m = Payload{} }
|
||||
func (m *Payload) String() string { return proto.CompactTextString(m) }
|
||||
func (*Payload) ProtoMessage() {}
|
||||
func (*Payload) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_8f954d82c0b891f6, []int{6}
|
||||
}
|
||||
|
||||
func (m *Payload) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Payload.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Payload) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Payload.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Payload) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Payload.Merge(m, src)
|
||||
}
|
||||
func (m *Payload) XXX_Size() int {
|
||||
return xxx_messageInfo_Payload.Size(m)
|
||||
}
|
||||
func (m *Payload) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Payload.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Payload proto.InternalMessageInfo
|
||||
|
||||
func (m *Payload) GetHeader() *Header {
|
||||
if m != nil {
|
||||
return m.Header
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Payload) GetData() []byte {
|
||||
if m != nil {
|
||||
return m.Data
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Envelope wraps a Payload with a signature so that the message may be authenticated
|
||||
type Envelope struct {
|
||||
// A marshaled Payload
|
||||
Payload []byte `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"`
|
||||
// A signature by the creator specified in the Payload header
|
||||
Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Envelope) Reset() { *m = Envelope{} }
|
||||
func (m *Envelope) String() string { return proto.CompactTextString(m) }
|
||||
func (*Envelope) ProtoMessage() {}
|
||||
func (*Envelope) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_8f954d82c0b891f6, []int{7}
|
||||
}
|
||||
|
||||
func (m *Envelope) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Envelope.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Envelope) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Envelope.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Envelope) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Envelope.Merge(m, src)
|
||||
}
|
||||
func (m *Envelope) XXX_Size() int {
|
||||
return xxx_messageInfo_Envelope.Size(m)
|
||||
}
|
||||
func (m *Envelope) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Envelope.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Envelope proto.InternalMessageInfo
|
||||
|
||||
func (m *Envelope) GetPayload() []byte {
|
||||
if m != nil {
|
||||
return m.Payload
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Envelope) GetSignature() []byte {
|
||||
if m != nil {
|
||||
return m.Signature
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This is finalized block structure to be shared among the orderer and peer
|
||||
// Note that the BlockHeader chains to the previous BlockHeader, and the BlockData hash is embedded
|
||||
// in the BlockHeader. This makes it natural and obvious that the Data is included in the hash, but
|
||||
// the Metadata is not.
|
||||
type Block struct {
|
||||
Header *BlockHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
|
||||
Data *BlockData `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
|
||||
Metadata *BlockMetadata `protobuf:"bytes,3,opt,name=metadata,proto3" json:"metadata,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Block) Reset() { *m = Block{} }
|
||||
func (m *Block) String() string { return proto.CompactTextString(m) }
|
||||
func (*Block) ProtoMessage() {}
|
||||
func (*Block) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_8f954d82c0b891f6, []int{8}
|
||||
}
|
||||
|
||||
func (m *Block) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Block.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Block) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Block.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Block) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Block.Merge(m, src)
|
||||
}
|
||||
func (m *Block) XXX_Size() int {
|
||||
return xxx_messageInfo_Block.Size(m)
|
||||
}
|
||||
func (m *Block) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Block.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Block proto.InternalMessageInfo
|
||||
|
||||
func (m *Block) GetHeader() *BlockHeader {
|
||||
if m != nil {
|
||||
return m.Header
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Block) GetData() *BlockData {
|
||||
if m != nil {
|
||||
return m.Data
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Block) GetMetadata() *BlockMetadata {
|
||||
if m != nil {
|
||||
return m.Metadata
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BlockHeader is the element of the block which forms the block chain
|
||||
// The block header is hashed using the configured chain hashing algorithm
|
||||
// over the ASN.1 encoding of the BlockHeader
|
||||
type BlockHeader struct {
|
||||
Number uint64 `protobuf:"varint,1,opt,name=number,proto3" json:"number,omitempty"`
|
||||
PreviousHash []byte `protobuf:"bytes,2,opt,name=previous_hash,json=previousHash,proto3" json:"previous_hash,omitempty"`
|
||||
DataHash []byte `protobuf:"bytes,3,opt,name=data_hash,json=dataHash,proto3" json:"data_hash,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *BlockHeader) Reset() { *m = BlockHeader{} }
|
||||
func (m *BlockHeader) String() string { return proto.CompactTextString(m) }
|
||||
func (*BlockHeader) ProtoMessage() {}
|
||||
func (*BlockHeader) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_8f954d82c0b891f6, []int{9}
|
||||
}
|
||||
|
||||
func (m *BlockHeader) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_BlockHeader.Unmarshal(m, b)
|
||||
}
|
||||
func (m *BlockHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_BlockHeader.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *BlockHeader) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_BlockHeader.Merge(m, src)
|
||||
}
|
||||
func (m *BlockHeader) XXX_Size() int {
|
||||
return xxx_messageInfo_BlockHeader.Size(m)
|
||||
}
|
||||
func (m *BlockHeader) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_BlockHeader.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_BlockHeader proto.InternalMessageInfo
|
||||
|
||||
func (m *BlockHeader) GetNumber() uint64 {
|
||||
if m != nil {
|
||||
return m.Number
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *BlockHeader) GetPreviousHash() []byte {
|
||||
if m != nil {
|
||||
return m.PreviousHash
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *BlockHeader) GetDataHash() []byte {
|
||||
if m != nil {
|
||||
return m.DataHash
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type BlockData struct {
|
||||
Data [][]byte `protobuf:"bytes,1,rep,name=data,proto3" json:"data,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *BlockData) Reset() { *m = BlockData{} }
|
||||
func (m *BlockData) String() string { return proto.CompactTextString(m) }
|
||||
func (*BlockData) ProtoMessage() {}
|
||||
func (*BlockData) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_8f954d82c0b891f6, []int{10}
|
||||
}
|
||||
|
||||
func (m *BlockData) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_BlockData.Unmarshal(m, b)
|
||||
}
|
||||
func (m *BlockData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_BlockData.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *BlockData) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_BlockData.Merge(m, src)
|
||||
}
|
||||
func (m *BlockData) XXX_Size() int {
|
||||
return xxx_messageInfo_BlockData.Size(m)
|
||||
}
|
||||
func (m *BlockData) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_BlockData.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_BlockData proto.InternalMessageInfo
|
||||
|
||||
func (m *BlockData) GetData() [][]byte {
|
||||
if m != nil {
|
||||
return m.Data
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type BlockMetadata struct {
|
||||
Metadata [][]byte `protobuf:"bytes,1,rep,name=metadata,proto3" json:"metadata,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *BlockMetadata) Reset() { *m = BlockMetadata{} }
|
||||
func (m *BlockMetadata) String() string { return proto.CompactTextString(m) }
|
||||
func (*BlockMetadata) ProtoMessage() {}
|
||||
func (*BlockMetadata) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_8f954d82c0b891f6, []int{11}
|
||||
}
|
||||
|
||||
func (m *BlockMetadata) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_BlockMetadata.Unmarshal(m, b)
|
||||
}
|
||||
func (m *BlockMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_BlockMetadata.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *BlockMetadata) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_BlockMetadata.Merge(m, src)
|
||||
}
|
||||
func (m *BlockMetadata) XXX_Size() int {
|
||||
return xxx_messageInfo_BlockMetadata.Size(m)
|
||||
}
|
||||
func (m *BlockMetadata) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_BlockMetadata.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_BlockMetadata proto.InternalMessageInfo
|
||||
|
||||
func (m *BlockMetadata) GetMetadata() [][]byte {
|
||||
if m != nil {
|
||||
return m.Metadata
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// OrdererBlockMetadata defines metadata that is set by the ordering service.
|
||||
type OrdererBlockMetadata struct {
|
||||
LastConfig *LastConfig `protobuf:"bytes,1,opt,name=last_config,json=lastConfig,proto3" json:"last_config,omitempty"`
|
||||
ConsenterMetadata []byte `protobuf:"bytes,2,opt,name=consenter_metadata,json=consenterMetadata,proto3" json:"consenter_metadata,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *OrdererBlockMetadata) Reset() { *m = OrdererBlockMetadata{} }
|
||||
func (m *OrdererBlockMetadata) String() string { return proto.CompactTextString(m) }
|
||||
func (*OrdererBlockMetadata) ProtoMessage() {}
|
||||
func (*OrdererBlockMetadata) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_8f954d82c0b891f6, []int{12}
|
||||
}
|
||||
|
||||
func (m *OrdererBlockMetadata) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_OrdererBlockMetadata.Unmarshal(m, b)
|
||||
}
|
||||
func (m *OrdererBlockMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_OrdererBlockMetadata.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *OrdererBlockMetadata) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_OrdererBlockMetadata.Merge(m, src)
|
||||
}
|
||||
func (m *OrdererBlockMetadata) XXX_Size() int {
|
||||
return xxx_messageInfo_OrdererBlockMetadata.Size(m)
|
||||
}
|
||||
func (m *OrdererBlockMetadata) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_OrdererBlockMetadata.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_OrdererBlockMetadata proto.InternalMessageInfo
|
||||
|
||||
func (m *OrdererBlockMetadata) GetLastConfig() *LastConfig {
|
||||
if m != nil {
|
||||
return m.LastConfig
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *OrdererBlockMetadata) GetConsenterMetadata() []byte {
|
||||
if m != nil {
|
||||
return m.ConsenterMetadata
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("common.Status", Status_name, Status_value)
|
||||
proto.RegisterEnum("common.HeaderType", HeaderType_name, HeaderType_value)
|
||||
proto.RegisterEnum("common.BlockMetadataIndex", BlockMetadataIndex_name, BlockMetadataIndex_value)
|
||||
proto.RegisterType((*LastConfig)(nil), "common.LastConfig")
|
||||
proto.RegisterType((*Metadata)(nil), "common.Metadata")
|
||||
proto.RegisterType((*MetadataSignature)(nil), "common.MetadataSignature")
|
||||
proto.RegisterType((*Header)(nil), "common.Header")
|
||||
proto.RegisterType((*ChannelHeader)(nil), "common.ChannelHeader")
|
||||
proto.RegisterType((*SignatureHeader)(nil), "common.SignatureHeader")
|
||||
proto.RegisterType((*Payload)(nil), "common.Payload")
|
||||
proto.RegisterType((*Envelope)(nil), "common.Envelope")
|
||||
proto.RegisterType((*Block)(nil), "common.Block")
|
||||
proto.RegisterType((*BlockHeader)(nil), "common.BlockHeader")
|
||||
proto.RegisterType((*BlockData)(nil), "common.BlockData")
|
||||
proto.RegisterType((*BlockMetadata)(nil), "common.BlockMetadata")
|
||||
proto.RegisterType((*OrdererBlockMetadata)(nil), "common.OrdererBlockMetadata")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("common/common.proto", fileDescriptor_8f954d82c0b891f6) }
|
||||
|
||||
var fileDescriptor_8f954d82c0b891f6 = []byte{
|
||||
// 1054 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x55, 0xcf, 0x6f, 0xe3, 0x44,
|
||||
0x14, 0xde, 0xc4, 0xf9, 0xf9, 0xbc, 0x69, 0x27, 0x93, 0x16, 0x4c, 0x61, 0xb5, 0x95, 0x61, 0x51,
|
||||
0xe9, 0xaa, 0xa9, 0xe8, 0x5e, 0xe0, 0xe8, 0xd8, 0xd3, 0xd6, 0x6a, 0x62, 0x87, 0xb1, 0x53, 0xc4,
|
||||
0x82, 0x64, 0xb9, 0xc9, 0x34, 0x89, 0x48, 0xec, 0xc8, 0x9e, 0x54, 0x2d, 0x57, 0xee, 0x08, 0x09,
|
||||
0xae, 0xfc, 0x2f, 0x1c, 0xf9, 0x5b, 0x38, 0x83, 0xb8, 0x22, 0x7b, 0x6c, 0x37, 0x29, 0x2b, 0x71,
|
||||
0x8a, 0xdf, 0x37, 0x9f, 0xdf, 0xfb, 0xde, 0xfb, 0x5e, 0xc6, 0xd0, 0x19, 0x87, 0xcb, 0x65, 0x18,
|
||||
0x9c, 0x8a, 0x9f, 0xee, 0x2a, 0x0a, 0x79, 0x88, 0x6b, 0x22, 0x3a, 0x78, 0x39, 0x0d, 0xc3, 0xe9,
|
||||
0x82, 0x9d, 0xa6, 0xe8, 0xcd, 0xfa, 0xf6, 0x94, 0xcf, 0x97, 0x2c, 0xe6, 0xfe, 0x72, 0x25, 0x88,
|
||||
0xaa, 0x0a, 0xd0, 0xf7, 0x63, 0xae, 0x87, 0xc1, 0xed, 0x7c, 0x8a, 0xf7, 0xa0, 0x3a, 0x0f, 0x26,
|
||||
0xec, 0x5e, 0x29, 0x1d, 0x96, 0x8e, 0x2a, 0x54, 0x04, 0xea, 0xb7, 0xd0, 0x18, 0x30, 0xee, 0x4f,
|
||||
0x7c, 0xee, 0x27, 0x8c, 0x3b, 0x7f, 0xb1, 0x66, 0x29, 0xe3, 0x39, 0x15, 0x01, 0xfe, 0x12, 0x20,
|
||||
0x9e, 0x4f, 0x03, 0x9f, 0xaf, 0x23, 0x16, 0x2b, 0xe5, 0x43, 0xe9, 0x48, 0x3e, 0xfb, 0xa0, 0x9b,
|
||||
0x29, 0xca, 0xdf, 0x75, 0x72, 0x06, 0xdd, 0x20, 0xab, 0xdf, 0x41, 0xfb, 0x3f, 0x04, 0xfc, 0x19,
|
||||
0xa0, 0x82, 0xe2, 0xcd, 0x98, 0x3f, 0x61, 0x51, 0x56, 0x70, 0xb7, 0xc0, 0x2f, 0x53, 0x18, 0x7f,
|
||||
0x04, 0xcd, 0x02, 0x52, 0xca, 0x29, 0xe7, 0x11, 0x50, 0xdf, 0x42, 0x2d, 0xe3, 0xbd, 0x82, 0x9d,
|
||||
0xf1, 0xcc, 0x0f, 0x02, 0xb6, 0xd8, 0x4e, 0xd8, 0xca, 0xd0, 0x8c, 0xf6, 0xae, 0xca, 0xe5, 0x77,
|
||||
0x56, 0x56, 0x7f, 0x2c, 0x43, 0x4b, 0xdf, 0x7a, 0x19, 0x43, 0x85, 0x3f, 0xac, 0xc4, 0x6c, 0xaa,
|
||||
0x34, 0x7d, 0xc6, 0x0a, 0xd4, 0xef, 0x58, 0x14, 0xcf, 0xc3, 0x20, 0xcd, 0x53, 0xa5, 0x79, 0x88,
|
||||
0xbf, 0x80, 0x66, 0xe1, 0x86, 0x22, 0x1d, 0x96, 0x8e, 0xe4, 0xb3, 0x83, 0xae, 0xf0, 0xab, 0x9b,
|
||||
0xfb, 0xd5, 0x75, 0x73, 0x06, 0x7d, 0x24, 0xe3, 0x17, 0x00, 0x79, 0x2f, 0xf3, 0x89, 0x52, 0x39,
|
||||
0x2c, 0x1d, 0x35, 0x69, 0x33, 0x43, 0xcc, 0x09, 0xee, 0x40, 0x95, 0xdf, 0x27, 0x27, 0xd5, 0xf4,
|
||||
0xa4, 0xc2, 0xef, 0xcd, 0x49, 0x62, 0x1c, 0x5b, 0x85, 0xe3, 0x99, 0x52, 0x13, 0xd6, 0xa6, 0x41,
|
||||
0x32, 0x3d, 0x76, 0xcf, 0x59, 0x90, 0xea, 0xab, 0x8b, 0xe9, 0x15, 0x00, 0x56, 0xa1, 0xc5, 0x17,
|
||||
0xb1, 0x37, 0x66, 0x11, 0xf7, 0x66, 0x7e, 0x3c, 0x53, 0x1a, 0x29, 0x43, 0xe6, 0x8b, 0x58, 0x67,
|
||||
0x11, 0xbf, 0xf4, 0xe3, 0x99, 0xaa, 0xc1, 0xae, 0xf3, 0xc4, 0x12, 0x05, 0xea, 0xe3, 0x88, 0xf9,
|
||||
0x3c, 0xcc, 0x67, 0x9c, 0x87, 0x89, 0x88, 0x20, 0x0c, 0xc6, 0xb9, 0x51, 0x22, 0x50, 0x09, 0xd4,
|
||||
0x87, 0xfe, 0xc3, 0x22, 0xf4, 0x27, 0xf8, 0x53, 0xa8, 0x6d, 0xb8, 0x23, 0x9f, 0xed, 0xe4, 0x4b,
|
||||
0x24, 0x52, 0xd3, 0xec, 0x34, 0x99, 0x74, 0xb2, 0x31, 0x59, 0x9e, 0xf4, 0x59, 0xed, 0x41, 0x83,
|
||||
0x04, 0x77, 0x6c, 0x11, 0x8a, 0xa9, 0xaf, 0x44, 0xca, 0x5c, 0x42, 0x16, 0xfe, 0xcf, 0xbe, 0xfc,
|
||||
0x54, 0x82, 0x6a, 0x6f, 0x11, 0x8e, 0xbf, 0xc7, 0xaf, 0x9f, 0x28, 0xe9, 0xe4, 0x4a, 0xd2, 0xe3,
|
||||
0x27, 0x72, 0x5e, 0x6d, 0xc8, 0x91, 0xcf, 0xda, 0x5b, 0x54, 0xc3, 0xe7, 0xbe, 0x50, 0x88, 0x3f,
|
||||
0x87, 0xc6, 0x32, 0xdb, 0xf5, 0xcc, 0xf0, 0xfd, 0x2d, 0x6a, 0xfe, 0x47, 0xa0, 0x05, 0x4d, 0x9d,
|
||||
0x82, 0xbc, 0x51, 0x10, 0xbf, 0x07, 0xb5, 0x60, 0xbd, 0xbc, 0xc9, 0x54, 0x55, 0x68, 0x16, 0xe1,
|
||||
0x8f, 0xa1, 0xb5, 0x8a, 0xd8, 0xdd, 0x3c, 0x5c, 0xc7, 0xc2, 0x29, 0xd1, 0xd9, 0xf3, 0x1c, 0x4c,
|
||||
0xac, 0xc2, 0x1f, 0x42, 0x33, 0xc9, 0x29, 0x08, 0x52, 0x4a, 0x68, 0x24, 0x40, 0xea, 0xe3, 0x4b,
|
||||
0x68, 0x16, 0x72, 0x8b, 0xf1, 0x96, 0x0e, 0xa5, 0x62, 0xbc, 0xaf, 0xa1, 0xb5, 0x25, 0x12, 0x1f,
|
||||
0x6c, 0x74, 0x23, 0x88, 0x8f, 0xb2, 0x7f, 0x80, 0x3d, 0x3b, 0x9a, 0xb0, 0x88, 0x45, 0xdb, 0xef,
|
||||
0xbc, 0x01, 0x79, 0xe1, 0xc7, 0xdc, 0x1b, 0xa7, 0xf7, 0x4d, 0x36, 0x5a, 0x9c, 0x0f, 0xe1, 0xf1,
|
||||
0x26, 0xa2, 0xb0, 0x78, 0xbc, 0x95, 0x4e, 0x00, 0x8f, 0xc3, 0x20, 0x66, 0x01, 0x67, 0x91, 0x57,
|
||||
0x94, 0x14, 0x1d, 0xb6, 0x8b, 0x93, 0xbc, 0xc6, 0xf1, 0xef, 0x25, 0xa8, 0x39, 0xdc, 0xe7, 0xeb,
|
||||
0x18, 0xcb, 0x50, 0x1f, 0x59, 0x57, 0x96, 0xfd, 0xb5, 0x85, 0x9e, 0xe1, 0xe7, 0x50, 0x77, 0x46,
|
||||
0xba, 0x4e, 0x1c, 0x07, 0xfd, 0x51, 0xc2, 0x08, 0xe4, 0x9e, 0x66, 0x78, 0x94, 0x7c, 0x35, 0x22,
|
||||
0x8e, 0x8b, 0x7e, 0x96, 0xf0, 0x0e, 0x34, 0xcf, 0x6d, 0xda, 0x33, 0x0d, 0x83, 0x58, 0xe8, 0x97,
|
||||
0x34, 0xb6, 0x6c, 0xd7, 0x3b, 0xb7, 0x47, 0x96, 0x81, 0x7e, 0x95, 0xf0, 0x0b, 0x50, 0x32, 0xb6,
|
||||
0x47, 0x2c, 0xd7, 0x74, 0xbf, 0xf1, 0x5c, 0xdb, 0xf6, 0xfa, 0x1a, 0xbd, 0x20, 0xe8, 0x37, 0x09,
|
||||
0x1f, 0xc0, 0xbe, 0x69, 0xb9, 0x84, 0x5a, 0x5a, 0xdf, 0x73, 0x08, 0xbd, 0x26, 0xd4, 0x23, 0x94,
|
||||
0xda, 0x14, 0xfd, 0x25, 0xe1, 0x3d, 0xd8, 0x4d, 0x52, 0x99, 0x83, 0x61, 0x9f, 0x0c, 0x88, 0xe5,
|
||||
0x12, 0x03, 0xfd, 0x2d, 0x61, 0x05, 0x3a, 0x09, 0xd1, 0xd4, 0x89, 0x37, 0xb2, 0xb4, 0x6b, 0xcd,
|
||||
0xec, 0x6b, 0xbd, 0x3e, 0x41, 0xff, 0x48, 0xc7, 0x7f, 0x96, 0x00, 0x84, 0xe3, 0x6e, 0x72, 0x87,
|
||||
0xc8, 0x50, 0x1f, 0x10, 0xc7, 0xd1, 0x2e, 0x08, 0x7a, 0x86, 0x01, 0x6a, 0xba, 0x6d, 0x9d, 0x9b,
|
||||
0x17, 0xa8, 0x84, 0xdb, 0xd0, 0x12, 0xcf, 0xde, 0x68, 0x68, 0x68, 0x2e, 0x41, 0x65, 0xac, 0xc0,
|
||||
0x1e, 0xb1, 0x0c, 0x9b, 0x3a, 0x84, 0x7a, 0x2e, 0xd5, 0x2c, 0x47, 0xd3, 0x5d, 0xd3, 0xb6, 0x90,
|
||||
0x84, 0xdf, 0x87, 0x8e, 0x4d, 0x0d, 0x42, 0x9f, 0x1c, 0x54, 0xf0, 0x3e, 0xb4, 0x0d, 0xd2, 0x37,
|
||||
0x13, 0xc5, 0x0e, 0x21, 0x57, 0x9e, 0x69, 0x9d, 0xdb, 0xa8, 0x9a, 0xc0, 0xfa, 0xa5, 0x66, 0x5a,
|
||||
0xba, 0x6d, 0x10, 0x6f, 0xa8, 0xe9, 0x57, 0x49, 0xfd, 0x9a, 0x5a, 0x69, 0xd4, 0x51, 0x5d, 0xad,
|
||||
0x34, 0x1a, 0xa8, 0xa1, 0x56, 0x1a, 0x4d, 0xd4, 0x3c, 0xde, 0x1b, 0x12, 0x42, 0x3d, 0x4a, 0x1c,
|
||||
0x7b, 0x44, 0x93, 0x5e, 0x52, 0x29, 0x19, 0xaa, 0x19, 0x03, 0xd3, 0xf2, 0xec, 0x21, 0xa1, 0x5a,
|
||||
0x52, 0xed, 0xb8, 0xed, 0xda, 0x57, 0xc4, 0xda, 0x14, 0x70, 0xcc, 0x01, 0x6f, 0x2d, 0x89, 0x99,
|
||||
0x7c, 0x74, 0xf0, 0x0e, 0x80, 0x63, 0x5e, 0x58, 0x9a, 0x3b, 0xa2, 0xc4, 0x41, 0xcf, 0x70, 0x07,
|
||||
0xe4, 0xbe, 0xe6, 0xb8, 0x5e, 0xde, 0xfb, 0x41, 0xb9, 0x51, 0x4a, 0x5a, 0xda, 0xc8, 0xe4, 0x78,
|
||||
0xe7, 0x66, 0xdf, 0x25, 0x14, 0x95, 0xf1, 0x2e, 0xd4, 0xb3, 0x5e, 0x91, 0x94, 0x32, 0x77, 0x41,
|
||||
0xd6, 0xed, 0xc1, 0xc0, 0x74, 0xbd, 0x4b, 0xcd, 0xb9, 0x44, 0x95, 0xde, 0x35, 0x7c, 0x12, 0x46,
|
||||
0xd3, 0xee, 0xec, 0x61, 0xc5, 0xa2, 0x05, 0x9b, 0x4c, 0x59, 0xd4, 0xbd, 0xf5, 0x6f, 0xa2, 0xf9,
|
||||
0x58, 0xdc, 0xbd, 0x71, 0xb6, 0x93, 0x6f, 0xbb, 0xd3, 0x39, 0x9f, 0xad, 0x6f, 0x92, 0xf0, 0x74,
|
||||
0x83, 0x7c, 0x2a, 0xc8, 0x27, 0x82, 0x7c, 0x32, 0x0d, 0xb3, 0xef, 0xef, 0x4d, 0x2d, 0x45, 0xde,
|
||||
0xfc, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x6b, 0x17, 0x38, 0x20, 0x97, 0x07, 0x00, 0x00,
|
||||
}
|
548
chaincode/vendor/github.com/hyperledger/fabric-protos-go/common/configtx.pb.go
generated
vendored
548
chaincode/vendor/github.com/hyperledger/fabric-protos-go/common/configtx.pb.go
generated
vendored
@ -0,0 +1,548 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: common/configtx.proto
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// ConfigEnvelope is designed to contain _all_ configuration for a chain with no dependency
|
||||
// on previous configuration transactions.
|
||||
//
|
||||
// It is generated with the following scheme:
|
||||
// 1. Retrieve the existing configuration
|
||||
// 2. Note the config properties (ConfigValue, ConfigPolicy, ConfigGroup) to be modified
|
||||
// 3. Add any intermediate ConfigGroups to the ConfigUpdate.read_set (sparsely)
|
||||
// 4. Add any additional desired dependencies to ConfigUpdate.read_set (sparsely)
|
||||
// 5. Modify the config properties, incrementing each version by 1, set them in the ConfigUpdate.write_set
|
||||
// Note: any element not modified but specified should already be in the read_set, so may be specified sparsely
|
||||
// 6. Create ConfigUpdate message and marshal it into ConfigUpdateEnvelope.update and encode the required signatures
|
||||
// a) Each signature is of type ConfigSignature
|
||||
// b) The ConfigSignature signature is over the concatenation of signature_header and the ConfigUpdate bytes (which includes a ChainHeader)
|
||||
// 5. Submit new Config for ordering in Envelope signed by submitter
|
||||
// a) The Envelope Payload has data set to the marshaled ConfigEnvelope
|
||||
// b) The Envelope Payload has a header of type Header.Type.CONFIG_UPDATE
|
||||
//
|
||||
// The configuration manager will verify:
|
||||
// 1. All items in the read_set exist at the read versions
|
||||
// 2. All items in the write_set at a different version than, or not in, the read_set have been appropriately signed according to their mod_policy
|
||||
// 3. The new configuration satisfies the ConfigSchema
|
||||
type ConfigEnvelope struct {
|
||||
Config *Config `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
|
||||
LastUpdate *Envelope `protobuf:"bytes,2,opt,name=last_update,json=lastUpdate,proto3" json:"last_update,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ConfigEnvelope) Reset() { *m = ConfigEnvelope{} }
|
||||
func (m *ConfigEnvelope) String() string { return proto.CompactTextString(m) }
|
||||
func (*ConfigEnvelope) ProtoMessage() {}
|
||||
func (*ConfigEnvelope) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_5190bbf196fa7499, []int{0}
|
||||
}
|
||||
|
||||
func (m *ConfigEnvelope) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ConfigEnvelope.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ConfigEnvelope) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ConfigEnvelope.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ConfigEnvelope) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ConfigEnvelope.Merge(m, src)
|
||||
}
|
||||
func (m *ConfigEnvelope) XXX_Size() int {
|
||||
return xxx_messageInfo_ConfigEnvelope.Size(m)
|
||||
}
|
||||
func (m *ConfigEnvelope) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ConfigEnvelope.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ConfigEnvelope proto.InternalMessageInfo
|
||||
|
||||
func (m *ConfigEnvelope) GetConfig() *Config {
|
||||
if m != nil {
|
||||
return m.Config
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ConfigEnvelope) GetLastUpdate() *Envelope {
|
||||
if m != nil {
|
||||
return m.LastUpdate
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Config represents the config for a particular channel
|
||||
type Config struct {
|
||||
Sequence uint64 `protobuf:"varint,1,opt,name=sequence,proto3" json:"sequence,omitempty"`
|
||||
ChannelGroup *ConfigGroup `protobuf:"bytes,2,opt,name=channel_group,json=channelGroup,proto3" json:"channel_group,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Config) Reset() { *m = Config{} }
|
||||
func (m *Config) String() string { return proto.CompactTextString(m) }
|
||||
func (*Config) ProtoMessage() {}
|
||||
func (*Config) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_5190bbf196fa7499, []int{1}
|
||||
}
|
||||
|
||||
func (m *Config) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Config.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Config) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Config.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Config) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Config.Merge(m, src)
|
||||
}
|
||||
func (m *Config) XXX_Size() int {
|
||||
return xxx_messageInfo_Config.Size(m)
|
||||
}
|
||||
func (m *Config) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Config.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Config proto.InternalMessageInfo
|
||||
|
||||
func (m *Config) GetSequence() uint64 {
|
||||
if m != nil {
|
||||
return m.Sequence
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Config) GetChannelGroup() *ConfigGroup {
|
||||
if m != nil {
|
||||
return m.ChannelGroup
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ConfigUpdateEnvelope struct {
|
||||
ConfigUpdate []byte `protobuf:"bytes,1,opt,name=config_update,json=configUpdate,proto3" json:"config_update,omitempty"`
|
||||
Signatures []*ConfigSignature `protobuf:"bytes,2,rep,name=signatures,proto3" json:"signatures,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ConfigUpdateEnvelope) Reset() { *m = ConfigUpdateEnvelope{} }
|
||||
func (m *ConfigUpdateEnvelope) String() string { return proto.CompactTextString(m) }
|
||||
func (*ConfigUpdateEnvelope) ProtoMessage() {}
|
||||
func (*ConfigUpdateEnvelope) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_5190bbf196fa7499, []int{2}
|
||||
}
|
||||
|
||||
func (m *ConfigUpdateEnvelope) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ConfigUpdateEnvelope.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ConfigUpdateEnvelope) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ConfigUpdateEnvelope.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ConfigUpdateEnvelope) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ConfigUpdateEnvelope.Merge(m, src)
|
||||
}
|
||||
func (m *ConfigUpdateEnvelope) XXX_Size() int {
|
||||
return xxx_messageInfo_ConfigUpdateEnvelope.Size(m)
|
||||
}
|
||||
func (m *ConfigUpdateEnvelope) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ConfigUpdateEnvelope.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ConfigUpdateEnvelope proto.InternalMessageInfo
|
||||
|
||||
func (m *ConfigUpdateEnvelope) GetConfigUpdate() []byte {
|
||||
if m != nil {
|
||||
return m.ConfigUpdate
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ConfigUpdateEnvelope) GetSignatures() []*ConfigSignature {
|
||||
if m != nil {
|
||||
return m.Signatures
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConfigUpdate is used to submit a subset of config and to have the orderer apply to Config
|
||||
// it is always submitted inside a ConfigUpdateEnvelope which allows the addition of signatures
|
||||
// resulting in a new total configuration. The update is applied as follows:
|
||||
// 1. The versions from all of the elements in the read_set is verified against the versions in the existing config.
|
||||
// If there is a mismatch in the read versions, then the config update fails and is rejected.
|
||||
// 2. Any elements in the write_set with the same version as the read_set are ignored.
|
||||
// 3. The corresponding mod_policy for every remaining element in the write_set is collected.
|
||||
// 4. Each policy is checked against the signatures from the ConfigUpdateEnvelope, any failing to verify are rejected
|
||||
// 5. The write_set is applied to the Config and the ConfigGroupSchema verifies that the updates were legal
|
||||
type ConfigUpdate struct {
|
||||
ChannelId string `protobuf:"bytes,1,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"`
|
||||
ReadSet *ConfigGroup `protobuf:"bytes,2,opt,name=read_set,json=readSet,proto3" json:"read_set,omitempty"`
|
||||
WriteSet *ConfigGroup `protobuf:"bytes,3,opt,name=write_set,json=writeSet,proto3" json:"write_set,omitempty"`
|
||||
IsolatedData map[string][]byte `protobuf:"bytes,5,rep,name=isolated_data,json=isolatedData,proto3" json:"isolated_data,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ConfigUpdate) Reset() { *m = ConfigUpdate{} }
|
||||
func (m *ConfigUpdate) String() string { return proto.CompactTextString(m) }
|
||||
func (*ConfigUpdate) ProtoMessage() {}
|
||||
func (*ConfigUpdate) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_5190bbf196fa7499, []int{3}
|
||||
}
|
||||
|
||||
func (m *ConfigUpdate) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ConfigUpdate.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ConfigUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ConfigUpdate.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ConfigUpdate) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ConfigUpdate.Merge(m, src)
|
||||
}
|
||||
func (m *ConfigUpdate) XXX_Size() int {
|
||||
return xxx_messageInfo_ConfigUpdate.Size(m)
|
||||
}
|
||||
func (m *ConfigUpdate) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ConfigUpdate.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ConfigUpdate proto.InternalMessageInfo
|
||||
|
||||
func (m *ConfigUpdate) GetChannelId() string {
|
||||
if m != nil {
|
||||
return m.ChannelId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ConfigUpdate) GetReadSet() *ConfigGroup {
|
||||
if m != nil {
|
||||
return m.ReadSet
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ConfigUpdate) GetWriteSet() *ConfigGroup {
|
||||
if m != nil {
|
||||
return m.WriteSet
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ConfigUpdate) GetIsolatedData() map[string][]byte {
|
||||
if m != nil {
|
||||
return m.IsolatedData
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConfigGroup is the hierarchical data structure for holding config
|
||||
type ConfigGroup struct {
|
||||
Version uint64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"`
|
||||
Groups map[string]*ConfigGroup `protobuf:"bytes,2,rep,name=groups,proto3" json:"groups,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
Values map[string]*ConfigValue `protobuf:"bytes,3,rep,name=values,proto3" json:"values,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
Policies map[string]*ConfigPolicy `protobuf:"bytes,4,rep,name=policies,proto3" json:"policies,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
ModPolicy string `protobuf:"bytes,5,opt,name=mod_policy,json=modPolicy,proto3" json:"mod_policy,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ConfigGroup) Reset() { *m = ConfigGroup{} }
|
||||
func (m *ConfigGroup) String() string { return proto.CompactTextString(m) }
|
||||
func (*ConfigGroup) ProtoMessage() {}
|
||||
func (*ConfigGroup) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_5190bbf196fa7499, []int{4}
|
||||
}
|
||||
|
||||
func (m *ConfigGroup) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ConfigGroup.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ConfigGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ConfigGroup.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ConfigGroup) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ConfigGroup.Merge(m, src)
|
||||
}
|
||||
func (m *ConfigGroup) XXX_Size() int {
|
||||
return xxx_messageInfo_ConfigGroup.Size(m)
|
||||
}
|
||||
func (m *ConfigGroup) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ConfigGroup.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ConfigGroup proto.InternalMessageInfo
|
||||
|
||||
func (m *ConfigGroup) GetVersion() uint64 {
|
||||
if m != nil {
|
||||
return m.Version
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ConfigGroup) GetGroups() map[string]*ConfigGroup {
|
||||
if m != nil {
|
||||
return m.Groups
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ConfigGroup) GetValues() map[string]*ConfigValue {
|
||||
if m != nil {
|
||||
return m.Values
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ConfigGroup) GetPolicies() map[string]*ConfigPolicy {
|
||||
if m != nil {
|
||||
return m.Policies
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ConfigGroup) GetModPolicy() string {
|
||||
if m != nil {
|
||||
return m.ModPolicy
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// ConfigValue represents an individual piece of config data
|
||||
type ConfigValue struct {
|
||||
Version uint64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"`
|
||||
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||
ModPolicy string `protobuf:"bytes,3,opt,name=mod_policy,json=modPolicy,proto3" json:"mod_policy,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ConfigValue) Reset() { *m = ConfigValue{} }
|
||||
func (m *ConfigValue) String() string { return proto.CompactTextString(m) }
|
||||
func (*ConfigValue) ProtoMessage() {}
|
||||
func (*ConfigValue) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_5190bbf196fa7499, []int{5}
|
||||
}
|
||||
|
||||
func (m *ConfigValue) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ConfigValue.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ConfigValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ConfigValue.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ConfigValue) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ConfigValue.Merge(m, src)
|
||||
}
|
||||
func (m *ConfigValue) XXX_Size() int {
|
||||
return xxx_messageInfo_ConfigValue.Size(m)
|
||||
}
|
||||
func (m *ConfigValue) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ConfigValue.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ConfigValue proto.InternalMessageInfo
|
||||
|
||||
func (m *ConfigValue) GetVersion() uint64 {
|
||||
if m != nil {
|
||||
return m.Version
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ConfigValue) GetValue() []byte {
|
||||
if m != nil {
|
||||
return m.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ConfigValue) GetModPolicy() string {
|
||||
if m != nil {
|
||||
return m.ModPolicy
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type ConfigPolicy struct {
|
||||
Version uint64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"`
|
||||
Policy *Policy `protobuf:"bytes,2,opt,name=policy,proto3" json:"policy,omitempty"`
|
||||
ModPolicy string `protobuf:"bytes,3,opt,name=mod_policy,json=modPolicy,proto3" json:"mod_policy,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ConfigPolicy) Reset() { *m = ConfigPolicy{} }
|
||||
func (m *ConfigPolicy) String() string { return proto.CompactTextString(m) }
|
||||
func (*ConfigPolicy) ProtoMessage() {}
|
||||
func (*ConfigPolicy) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_5190bbf196fa7499, []int{6}
|
||||
}
|
||||
|
||||
func (m *ConfigPolicy) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ConfigPolicy.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ConfigPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ConfigPolicy.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ConfigPolicy) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ConfigPolicy.Merge(m, src)
|
||||
}
|
||||
func (m *ConfigPolicy) XXX_Size() int {
|
||||
return xxx_messageInfo_ConfigPolicy.Size(m)
|
||||
}
|
||||
func (m *ConfigPolicy) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ConfigPolicy.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ConfigPolicy proto.InternalMessageInfo
|
||||
|
||||
func (m *ConfigPolicy) GetVersion() uint64 {
|
||||
if m != nil {
|
||||
return m.Version
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ConfigPolicy) GetPolicy() *Policy {
|
||||
if m != nil {
|
||||
return m.Policy
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ConfigPolicy) GetModPolicy() string {
|
||||
if m != nil {
|
||||
return m.ModPolicy
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type ConfigSignature struct {
|
||||
SignatureHeader []byte `protobuf:"bytes,1,opt,name=signature_header,json=signatureHeader,proto3" json:"signature_header,omitempty"`
|
||||
Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ConfigSignature) Reset() { *m = ConfigSignature{} }
|
||||
func (m *ConfigSignature) String() string { return proto.CompactTextString(m) }
|
||||
func (*ConfigSignature) ProtoMessage() {}
|
||||
func (*ConfigSignature) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_5190bbf196fa7499, []int{7}
|
||||
}
|
||||
|
||||
func (m *ConfigSignature) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ConfigSignature.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ConfigSignature) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ConfigSignature.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ConfigSignature) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ConfigSignature.Merge(m, src)
|
||||
}
|
||||
func (m *ConfigSignature) XXX_Size() int {
|
||||
return xxx_messageInfo_ConfigSignature.Size(m)
|
||||
}
|
||||
func (m *ConfigSignature) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ConfigSignature.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ConfigSignature proto.InternalMessageInfo
|
||||
|
||||
func (m *ConfigSignature) GetSignatureHeader() []byte {
|
||||
if m != nil {
|
||||
return m.SignatureHeader
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ConfigSignature) GetSignature() []byte {
|
||||
if m != nil {
|
||||
return m.Signature
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*ConfigEnvelope)(nil), "common.ConfigEnvelope")
|
||||
proto.RegisterType((*Config)(nil), "common.Config")
|
||||
proto.RegisterType((*ConfigUpdateEnvelope)(nil), "common.ConfigUpdateEnvelope")
|
||||
proto.RegisterType((*ConfigUpdate)(nil), "common.ConfigUpdate")
|
||||
proto.RegisterMapType((map[string][]byte)(nil), "common.ConfigUpdate.IsolatedDataEntry")
|
||||
proto.RegisterType((*ConfigGroup)(nil), "common.ConfigGroup")
|
||||
proto.RegisterMapType((map[string]*ConfigGroup)(nil), "common.ConfigGroup.GroupsEntry")
|
||||
proto.RegisterMapType((map[string]*ConfigPolicy)(nil), "common.ConfigGroup.PoliciesEntry")
|
||||
proto.RegisterMapType((map[string]*ConfigValue)(nil), "common.ConfigGroup.ValuesEntry")
|
||||
proto.RegisterType((*ConfigValue)(nil), "common.ConfigValue")
|
||||
proto.RegisterType((*ConfigPolicy)(nil), "common.ConfigPolicy")
|
||||
proto.RegisterType((*ConfigSignature)(nil), "common.ConfigSignature")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("common/configtx.proto", fileDescriptor_5190bbf196fa7499) }
|
||||
|
||||
var fileDescriptor_5190bbf196fa7499 = []byte{
|
||||
// 645 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0x61, 0x4f, 0xd4, 0x4c,
|
||||
0x10, 0x0e, 0xd7, 0x5e, 0xe9, 0xcd, 0xf5, 0xe0, 0xde, 0x85, 0x37, 0x36, 0x17, 0x8d, 0x58, 0x0d,
|
||||
0x01, 0x13, 0x8a, 0xe2, 0x07, 0x88, 0x89, 0x31, 0x51, 0x89, 0x82, 0x89, 0xd1, 0x12, 0xf9, 0x40,
|
||||
0x4c, 0x9a, 0xa5, 0x5d, 0x7a, 0x95, 0x5e, 0xb7, 0x6e, 0xb7, 0x68, 0x7f, 0x92, 0x7f, 0xcd, 0x5f,
|
||||
0x61, 0xba, 0xbb, 0x2d, 0x2d, 0x1e, 0x67, 0xfc, 0x02, 0xcc, 0xcc, 0xf3, 0x3c, 0x33, 0xcf, 0xce,
|
||||
0x76, 0x81, 0xff, 0x03, 0x3a, 0x9b, 0xd1, 0x74, 0x37, 0xa0, 0xe9, 0x45, 0x1c, 0xf1, 0x1f, 0x6e,
|
||||
0xc6, 0x28, 0xa7, 0xc8, 0x90, 0xe9, 0xc9, 0x5a, 0x53, 0xae, 0x7e, 0xc9, 0xe2, 0xa4, 0xe6, 0x64,
|
||||
0x34, 0x89, 0x83, 0x98, 0xe4, 0x32, 0xed, 0x5c, 0xc2, 0xca, 0x6b, 0xa1, 0x72, 0x98, 0x5e, 0x91,
|
||||
0x84, 0x66, 0x04, 0x6d, 0x82, 0x21, 0x75, 0xed, 0xa5, 0x8d, 0xa5, 0xad, 0xe1, 0xde, 0x8a, 0xab,
|
||||
0x74, 0x24, 0xce, 0x53, 0x55, 0xf4, 0x14, 0x86, 0x09, 0xce, 0xb9, 0x5f, 0x64, 0x21, 0xe6, 0xc4,
|
||||
0xee, 0x09, 0xf0, 0xb8, 0x06, 0xd7, 0x72, 0x1e, 0x54, 0xa0, 0xcf, 0x02, 0xe3, 0x7c, 0x05, 0x43,
|
||||
0x8a, 0xa0, 0x09, 0x98, 0x39, 0xf9, 0x56, 0x90, 0x34, 0x20, 0xa2, 0x8d, 0xee, 0x35, 0x31, 0x3a,
|
||||
0x80, 0x51, 0x30, 0xc5, 0x69, 0x4a, 0x12, 0x3f, 0x62, 0xb4, 0xc8, 0x94, 0xf4, 0x5a, 0x77, 0x8e,
|
||||
0xb7, 0x55, 0xc9, 0xb3, 0x14, 0x52, 0x44, 0xc7, 0xba, 0xa9, 0x8d, 0x75, 0x4f, 0xe7, 0x65, 0x46,
|
||||
0x1c, 0x0e, 0xeb, 0x12, 0x28, 0x7b, 0x37, 0xf6, 0x1e, 0xc2, 0x48, 0x1a, 0xa8, 0x07, 0xaf, 0xda,
|
||||
0x5b, 0x9e, 0x15, 0xb4, 0xc0, 0x68, 0x1f, 0x20, 0x8f, 0xa3, 0x14, 0xf3, 0x82, 0x91, 0xdc, 0xee,
|
||||
0x6d, 0x68, 0x5b, 0xc3, 0xbd, 0x3b, 0xdd, 0xfe, 0x27, 0x75, 0xdd, 0x6b, 0x41, 0x9d, 0x9f, 0x3d,
|
||||
0xb0, 0xda, 0x6d, 0xd1, 0x3d, 0x80, 0xda, 0x4c, 0x1c, 0x8a, 0x5e, 0x03, 0x6f, 0xa0, 0x32, 0x47,
|
||||
0x21, 0x72, 0xc1, 0x64, 0x04, 0x87, 0x7e, 0x4e, 0xf8, 0x22, 0x9b, 0xcb, 0x15, 0xe8, 0x84, 0x70,
|
||||
0xf4, 0x04, 0x06, 0xdf, 0x59, 0xcc, 0x89, 0x20, 0x68, 0xb7, 0x13, 0x4c, 0x81, 0xaa, 0x18, 0xef,
|
||||
0x61, 0x14, 0xe7, 0x34, 0xc1, 0x9c, 0x84, 0x7e, 0x88, 0x39, 0xb6, 0xfb, 0xc2, 0xcd, 0x66, 0x97,
|
||||
0x25, 0xa7, 0x75, 0x8f, 0x14, 0xf2, 0x0d, 0xe6, 0xf8, 0x30, 0xe5, 0xac, 0xf4, 0xac, 0xb8, 0x95,
|
||||
0x9a, 0xbc, 0x84, 0xff, 0xfe, 0x80, 0xa0, 0x31, 0x68, 0x97, 0xa4, 0x54, 0xde, 0xaa, 0x3f, 0xd1,
|
||||
0x3a, 0xf4, 0xaf, 0x70, 0x52, 0xc8, 0x4b, 0x61, 0x79, 0x32, 0x78, 0xde, 0x3b, 0x58, 0x3a, 0xd6,
|
||||
0x4d, 0x7d, 0xdc, 0x57, 0x1b, 0xfa, 0xa5, 0xc1, 0xb0, 0x35, 0x33, 0xb2, 0x61, 0xf9, 0x8a, 0xb0,
|
||||
0x3c, 0xa6, 0xa9, 0xba, 0x12, 0x75, 0x88, 0xf6, 0xc1, 0x10, 0x37, 0xa1, 0x5e, 0xc5, 0xfd, 0x39,
|
||||
0x96, 0x5d, 0xf1, 0x33, 0x97, 0x53, 0x2b, 0x78, 0x45, 0x14, 0xbd, 0x73, 0x5b, 0xbb, 0x9d, 0x78,
|
||||
0x2a, 0x10, 0x8a, 0x28, 0xe1, 0xe8, 0x05, 0x98, 0xf5, 0x87, 0x62, 0xeb, 0x82, 0xfa, 0x60, 0x1e,
|
||||
0xf5, 0xa3, 0xc2, 0x48, 0x72, 0x43, 0xa9, 0xb6, 0x3e, 0xa3, 0xa1, 0x2f, 0xe2, 0xd2, 0xee, 0xcb,
|
||||
0xad, 0xcf, 0x68, 0x28, 0xf0, 0xe5, 0xe4, 0x03, 0x0c, 0x5b, 0xd3, 0xce, 0x39, 0xc0, 0xed, 0xf6,
|
||||
0x01, 0xde, 0xb2, 0xe2, 0xeb, 0x53, 0xad, 0xf4, 0x5a, 0x26, 0xfe, 0x59, 0x4f, 0x70, 0xdb, 0x7a,
|
||||
0x9f, 0x60, 0xd4, 0x71, 0x36, 0x47, 0xf1, 0x71, 0x57, 0x71, 0xbd, 0xab, 0x28, 0x7d, 0xb6, 0x24,
|
||||
0x9d, 0x2f, 0xf5, 0xae, 0x45, 0xb3, 0x05, 0xbb, 0x9e, 0x7b, 0x77, 0x6e, 0x1c, 0xa8, 0x76, 0xe3,
|
||||
0x40, 0x1d, 0x5a, 0x7f, 0x75, 0x32, 0x5e, 0x20, 0xbf, 0x09, 0x86, 0x12, 0xe9, 0x75, 0x5f, 0x37,
|
||||
0x35, 0xb2, 0xaa, 0xfe, 0xad, 0xe1, 0x19, 0xac, 0xde, 0x78, 0x06, 0xd0, 0x36, 0x8c, 0x9b, 0x87,
|
||||
0xc0, 0x9f, 0x12, 0x1c, 0x12, 0xa6, 0xde, 0x96, 0xd5, 0x26, 0xff, 0x4e, 0xa4, 0xd1, 0x5d, 0x18,
|
||||
0x34, 0x29, 0xe5, 0xf3, 0x3a, 0xf1, 0xea, 0x14, 0x1e, 0x51, 0x16, 0xb9, 0xd3, 0x32, 0x23, 0x2c,
|
||||
0x21, 0x61, 0x44, 0x98, 0x7b, 0x81, 0xcf, 0x59, 0x1c, 0xc8, 0x27, 0x3b, 0x57, 0x13, 0x9f, 0xb9,
|
||||
0x51, 0xcc, 0xa7, 0xc5, 0x79, 0x15, 0xee, 0xb6, 0xc0, 0xbb, 0x12, 0xbc, 0x23, 0xc1, 0x3b, 0x11,
|
||||
0x55, 0xff, 0x07, 0xce, 0x0d, 0x91, 0x79, 0xf6, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x4e, 0x9b, 0x6e,
|
||||
0xa7, 0x3e, 0x06, 0x00, 0x00,
|
||||
}
|
328
chaincode/vendor/github.com/hyperledger/fabric-protos-go/common/configuration.pb.go
generated
vendored
328
chaincode/vendor/github.com/hyperledger/fabric-protos-go/common/configuration.pb.go
generated
vendored
@ -0,0 +1,328 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: common/configuration.proto
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// HashingAlgorithm is encoded into the configuration transaction as a
|
||||
// configuration item of type Chain with a Key of "HashingAlgorithm" and a
|
||||
// Value of HashingAlgorithm as marshaled protobuf bytes
|
||||
type HashingAlgorithm struct {
|
||||
// SHA256 is currently the only supported and tested algorithm.
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *HashingAlgorithm) Reset() { *m = HashingAlgorithm{} }
|
||||
func (m *HashingAlgorithm) String() string { return proto.CompactTextString(m) }
|
||||
func (*HashingAlgorithm) ProtoMessage() {}
|
||||
func (*HashingAlgorithm) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cba1ec2883858369, []int{0}
|
||||
}
|
||||
|
||||
func (m *HashingAlgorithm) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_HashingAlgorithm.Unmarshal(m, b)
|
||||
}
|
||||
func (m *HashingAlgorithm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_HashingAlgorithm.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *HashingAlgorithm) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_HashingAlgorithm.Merge(m, src)
|
||||
}
|
||||
func (m *HashingAlgorithm) XXX_Size() int {
|
||||
return xxx_messageInfo_HashingAlgorithm.Size(m)
|
||||
}
|
||||
func (m *HashingAlgorithm) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_HashingAlgorithm.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_HashingAlgorithm proto.InternalMessageInfo
|
||||
|
||||
func (m *HashingAlgorithm) GetName() string {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// BlockDataHashingStructure is encoded into the configuration transaction as a configuration item of
|
||||
// type Chain with a Key of "BlockDataHashingStructure" and a Value of HashingAlgorithm as marshaled protobuf bytes
|
||||
type BlockDataHashingStructure struct {
|
||||
// width specifies the width of the Merkle tree to use when computing the BlockDataHash
|
||||
// in order to replicate flat hashing, set this width to MAX_UINT32
|
||||
Width uint32 `protobuf:"varint,1,opt,name=width,proto3" json:"width,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *BlockDataHashingStructure) Reset() { *m = BlockDataHashingStructure{} }
|
||||
func (m *BlockDataHashingStructure) String() string { return proto.CompactTextString(m) }
|
||||
func (*BlockDataHashingStructure) ProtoMessage() {}
|
||||
func (*BlockDataHashingStructure) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cba1ec2883858369, []int{1}
|
||||
}
|
||||
|
||||
func (m *BlockDataHashingStructure) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_BlockDataHashingStructure.Unmarshal(m, b)
|
||||
}
|
||||
func (m *BlockDataHashingStructure) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_BlockDataHashingStructure.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *BlockDataHashingStructure) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_BlockDataHashingStructure.Merge(m, src)
|
||||
}
|
||||
func (m *BlockDataHashingStructure) XXX_Size() int {
|
||||
return xxx_messageInfo_BlockDataHashingStructure.Size(m)
|
||||
}
|
||||
func (m *BlockDataHashingStructure) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_BlockDataHashingStructure.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_BlockDataHashingStructure proto.InternalMessageInfo
|
||||
|
||||
func (m *BlockDataHashingStructure) GetWidth() uint32 {
|
||||
if m != nil {
|
||||
return m.Width
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// OrdererAddresses is encoded into the configuration transaction as a configuration item of type Chain
|
||||
// with a Key of "OrdererAddresses" and a Value of OrdererAddresses as marshaled protobuf bytes
|
||||
type OrdererAddresses struct {
|
||||
Addresses []string `protobuf:"bytes,1,rep,name=addresses,proto3" json:"addresses,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *OrdererAddresses) Reset() { *m = OrdererAddresses{} }
|
||||
func (m *OrdererAddresses) String() string { return proto.CompactTextString(m) }
|
||||
func (*OrdererAddresses) ProtoMessage() {}
|
||||
func (*OrdererAddresses) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cba1ec2883858369, []int{2}
|
||||
}
|
||||
|
||||
func (m *OrdererAddresses) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_OrdererAddresses.Unmarshal(m, b)
|
||||
}
|
||||
func (m *OrdererAddresses) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_OrdererAddresses.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *OrdererAddresses) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_OrdererAddresses.Merge(m, src)
|
||||
}
|
||||
func (m *OrdererAddresses) XXX_Size() int {
|
||||
return xxx_messageInfo_OrdererAddresses.Size(m)
|
||||
}
|
||||
func (m *OrdererAddresses) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_OrdererAddresses.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_OrdererAddresses proto.InternalMessageInfo
|
||||
|
||||
func (m *OrdererAddresses) GetAddresses() []string {
|
||||
if m != nil {
|
||||
return m.Addresses
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Consortium represents the consortium context in which the channel was created
|
||||
type Consortium struct {
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Consortium) Reset() { *m = Consortium{} }
|
||||
func (m *Consortium) String() string { return proto.CompactTextString(m) }
|
||||
func (*Consortium) ProtoMessage() {}
|
||||
func (*Consortium) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cba1ec2883858369, []int{3}
|
||||
}
|
||||
|
||||
func (m *Consortium) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Consortium.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Consortium) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Consortium.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Consortium) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Consortium.Merge(m, src)
|
||||
}
|
||||
func (m *Consortium) XXX_Size() int {
|
||||
return xxx_messageInfo_Consortium.Size(m)
|
||||
}
|
||||
func (m *Consortium) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Consortium.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Consortium proto.InternalMessageInfo
|
||||
|
||||
func (m *Consortium) GetName() string {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Capabilities message defines the capabilities a particular binary must implement
|
||||
// for that binary to be able to safely participate in the channel. The capabilities
|
||||
// message is defined at the /Channel level, the /Channel/Application level, and the
|
||||
// /Channel/Orderer level.
|
||||
//
|
||||
// The /Channel level capabilties define capabilities which both the orderer and peer
|
||||
// binaries must satisfy. These capabilties might be things like a new MSP type,
|
||||
// or a new policy type.
|
||||
//
|
||||
// The /Channel/Orderer level capabilties define capabilities which must be supported
|
||||
// by the orderer, but which have no bearing on the behavior of the peer. For instance
|
||||
// if the orderer changes the logic for how it constructs new channels, only all orderers
|
||||
// must agree on the new logic. The peers do not need to be aware of this change as
|
||||
// they only interact with the channel after it has been constructed.
|
||||
//
|
||||
// Finally, the /Channel/Application level capabilities define capabilities which the peer
|
||||
// binary must satisfy, but which have no bearing on the orderer. For instance, if the
|
||||
// peer adds a new UTXO transaction type, or changes the chaincode lifecycle requirements,
|
||||
// all peers must agree on the new logic. However, orderers never inspect transactions
|
||||
// this deeply, and therefore have no need to be aware of the change.
|
||||
//
|
||||
// The capabilities strings defined in these messages typically correspond to release
|
||||
// binary versions (e.g. "V1.1"), and are used primarilly as a mechanism for a fully
|
||||
// upgraded network to switch from one set of logic to a new one.
|
||||
//
|
||||
// Although for V1.1, the orderers must be upgraded to V1.1 prior to the rest of the
|
||||
// network, going forward, because of the split between the /Channel, /Channel/Orderer
|
||||
// and /Channel/Application capabilities. It should be possible for the orderer and
|
||||
// application networks to upgrade themselves independently (with the exception of any
|
||||
// new capabilities defined at the /Channel level).
|
||||
type Capabilities struct {
|
||||
Capabilities map[string]*Capability `protobuf:"bytes,1,rep,name=capabilities,proto3" json:"capabilities,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Capabilities) Reset() { *m = Capabilities{} }
|
||||
func (m *Capabilities) String() string { return proto.CompactTextString(m) }
|
||||
func (*Capabilities) ProtoMessage() {}
|
||||
func (*Capabilities) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cba1ec2883858369, []int{4}
|
||||
}
|
||||
|
||||
func (m *Capabilities) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Capabilities.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Capabilities) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Capabilities.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Capabilities) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Capabilities.Merge(m, src)
|
||||
}
|
||||
func (m *Capabilities) XXX_Size() int {
|
||||
return xxx_messageInfo_Capabilities.Size(m)
|
||||
}
|
||||
func (m *Capabilities) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Capabilities.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Capabilities proto.InternalMessageInfo
|
||||
|
||||
func (m *Capabilities) GetCapabilities() map[string]*Capability {
|
||||
if m != nil {
|
||||
return m.Capabilities
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Capability is an empty message for the time being. It is defined as a protobuf
|
||||
// message rather than a constant, so that we may extend capabilities with other fields
|
||||
// if the need arises in the future. For the time being, a capability being in the
|
||||
// capabilities map requires that that capability be supported.
|
||||
type Capability struct {
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Capability) Reset() { *m = Capability{} }
|
||||
func (m *Capability) String() string { return proto.CompactTextString(m) }
|
||||
func (*Capability) ProtoMessage() {}
|
||||
func (*Capability) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cba1ec2883858369, []int{5}
|
||||
}
|
||||
|
||||
func (m *Capability) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Capability.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Capability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Capability.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Capability) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Capability.Merge(m, src)
|
||||
}
|
||||
func (m *Capability) XXX_Size() int {
|
||||
return xxx_messageInfo_Capability.Size(m)
|
||||
}
|
||||
func (m *Capability) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Capability.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Capability proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*HashingAlgorithm)(nil), "common.HashingAlgorithm")
|
||||
proto.RegisterType((*BlockDataHashingStructure)(nil), "common.BlockDataHashingStructure")
|
||||
proto.RegisterType((*OrdererAddresses)(nil), "common.OrdererAddresses")
|
||||
proto.RegisterType((*Consortium)(nil), "common.Consortium")
|
||||
proto.RegisterType((*Capabilities)(nil), "common.Capabilities")
|
||||
proto.RegisterMapType((map[string]*Capability)(nil), "common.Capabilities.CapabilitiesEntry")
|
||||
proto.RegisterType((*Capability)(nil), "common.Capability")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("common/configuration.proto", fileDescriptor_cba1ec2883858369) }
|
||||
|
||||
var fileDescriptor_cba1ec2883858369 = []byte{
|
||||
// 318 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0x41, 0x6b, 0xf2, 0x40,
|
||||
0x10, 0x86, 0x89, 0x7e, 0x0a, 0x8e, 0x7e, 0x60, 0x97, 0x1e, 0xac, 0xf4, 0x10, 0x42, 0x91, 0x5c,
|
||||
0x4c, 0x5a, 0x7b, 0x29, 0xbd, 0xa9, 0x2d, 0x94, 0x5e, 0x0a, 0x11, 0x7a, 0xe8, 0x6d, 0x93, 0xac,
|
||||
0x9b, 0xc5, 0x64, 0x57, 0x66, 0x77, 0x5b, 0xf2, 0xab, 0xfa, 0x17, 0x8b, 0x59, 0x8b, 0x8a, 0xbd,
|
||||
0xcd, 0x33, 0xf3, 0xbc, 0x93, 0x09, 0x0b, 0xe3, 0x4c, 0x55, 0x95, 0x92, 0x71, 0xa6, 0xe4, 0x5a,
|
||||
0x70, 0x8b, 0xd4, 0x08, 0x25, 0xa3, 0x2d, 0x2a, 0xa3, 0x48, 0xd7, 0xcd, 0x82, 0x09, 0x0c, 0x5f,
|
||||
0xa8, 0x2e, 0x84, 0xe4, 0xf3, 0x92, 0x2b, 0x14, 0xa6, 0xa8, 0x08, 0x81, 0x7f, 0x92, 0x56, 0x6c,
|
||||
0xe4, 0xf9, 0x5e, 0xd8, 0x4b, 0x9a, 0x3a, 0xb8, 0x83, 0xab, 0x45, 0xa9, 0xb2, 0xcd, 0x13, 0x35,
|
||||
0x74, 0x1f, 0x58, 0x19, 0xb4, 0x99, 0xb1, 0xc8, 0xc8, 0x25, 0x74, 0xbe, 0x44, 0x6e, 0x8a, 0x26,
|
||||
0xf1, 0x3f, 0x71, 0x10, 0xdc, 0xc2, 0xf0, 0x0d, 0x73, 0x86, 0x0c, 0xe7, 0x79, 0x8e, 0x4c, 0x6b,
|
||||
0xa6, 0xc9, 0x35, 0xf4, 0xe8, 0x2f, 0x8c, 0x3c, 0xbf, 0x1d, 0xf6, 0x92, 0x43, 0x23, 0xf0, 0x01,
|
||||
0x96, 0x4a, 0x6a, 0x85, 0x46, 0xd8, 0xbf, 0xcf, 0xf8, 0xf6, 0x60, 0xb0, 0xa4, 0x5b, 0x9a, 0x8a,
|
||||
0x52, 0x18, 0xc1, 0x34, 0x79, 0x85, 0x41, 0x76, 0xc4, 0xcd, 0xce, 0xfe, 0x6c, 0x12, 0xb9, 0xdf,
|
||||
0x8b, 0x8e, 0xdd, 0x13, 0x78, 0x96, 0x06, 0xeb, 0xe4, 0x24, 0x3b, 0x5e, 0xc1, 0xc5, 0x99, 0x42,
|
||||
0x86, 0xd0, 0xde, 0xb0, 0x7a, 0x7f, 0xc4, 0xae, 0x24, 0x21, 0x74, 0x3e, 0x69, 0x69, 0xd9, 0xa8,
|
||||
0xe5, 0x7b, 0x61, 0x7f, 0x46, 0xce, 0xbe, 0x55, 0x27, 0x4e, 0x78, 0x6c, 0x3d, 0x78, 0xc1, 0x00,
|
||||
0xe0, 0x30, 0x58, 0xbc, 0xc3, 0x8d, 0x42, 0x1e, 0x15, 0xf5, 0x96, 0x61, 0xc9, 0x72, 0xce, 0x30,
|
||||
0x5a, 0xd3, 0x14, 0x45, 0xe6, 0x9e, 0x45, 0xef, 0x77, 0x7d, 0x44, 0x5c, 0x98, 0xc2, 0xa6, 0x3b,
|
||||
0x8c, 0x8f, 0xe4, 0xd8, 0xc9, 0x53, 0x27, 0x4f, 0xb9, 0x8a, 0x9d, 0x9f, 0x76, 0x9b, 0xce, 0xfd,
|
||||
0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x3b, 0xe7, 0x4b, 0x89, 0xf3, 0x01, 0x00, 0x00,
|
||||
}
|
@ -0,0 +1,155 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: common/ledger.proto
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// Contains information about the blockchain ledger such as height, current
|
||||
// block hash, and previous block hash.
|
||||
type BlockchainInfo struct {
|
||||
Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"`
|
||||
CurrentBlockHash []byte `protobuf:"bytes,2,opt,name=currentBlockHash,proto3" json:"currentBlockHash,omitempty"`
|
||||
PreviousBlockHash []byte `protobuf:"bytes,3,opt,name=previousBlockHash,proto3" json:"previousBlockHash,omitempty"`
|
||||
// Specifies bootstrapping snapshot info if the channel is bootstrapped from a snapshot.
|
||||
// It is nil if the channel is not bootstrapped from a snapshot.
|
||||
BootstrappingSnapshotInfo *BootstrappingSnapshotInfo `protobuf:"bytes,4,opt,name=bootstrappingSnapshotInfo,proto3" json:"bootstrappingSnapshotInfo,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *BlockchainInfo) Reset() { *m = BlockchainInfo{} }
|
||||
func (m *BlockchainInfo) String() string { return proto.CompactTextString(m) }
|
||||
func (*BlockchainInfo) ProtoMessage() {}
|
||||
func (*BlockchainInfo) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_da3410306adbea27, []int{0}
|
||||
}
|
||||
|
||||
func (m *BlockchainInfo) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_BlockchainInfo.Unmarshal(m, b)
|
||||
}
|
||||
func (m *BlockchainInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_BlockchainInfo.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *BlockchainInfo) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_BlockchainInfo.Merge(m, src)
|
||||
}
|
||||
func (m *BlockchainInfo) XXX_Size() int {
|
||||
return xxx_messageInfo_BlockchainInfo.Size(m)
|
||||
}
|
||||
func (m *BlockchainInfo) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_BlockchainInfo.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_BlockchainInfo proto.InternalMessageInfo
|
||||
|
||||
func (m *BlockchainInfo) GetHeight() uint64 {
|
||||
if m != nil {
|
||||
return m.Height
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *BlockchainInfo) GetCurrentBlockHash() []byte {
|
||||
if m != nil {
|
||||
return m.CurrentBlockHash
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *BlockchainInfo) GetPreviousBlockHash() []byte {
|
||||
if m != nil {
|
||||
return m.PreviousBlockHash
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *BlockchainInfo) GetBootstrappingSnapshotInfo() *BootstrappingSnapshotInfo {
|
||||
if m != nil {
|
||||
return m.BootstrappingSnapshotInfo
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Contains information for the bootstrapping snapshot.
|
||||
type BootstrappingSnapshotInfo struct {
|
||||
LastBlockInSnapshot uint64 `protobuf:"varint,1,opt,name=lastBlockInSnapshot,proto3" json:"lastBlockInSnapshot,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *BootstrappingSnapshotInfo) Reset() { *m = BootstrappingSnapshotInfo{} }
|
||||
func (m *BootstrappingSnapshotInfo) String() string { return proto.CompactTextString(m) }
|
||||
func (*BootstrappingSnapshotInfo) ProtoMessage() {}
|
||||
func (*BootstrappingSnapshotInfo) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_da3410306adbea27, []int{1}
|
||||
}
|
||||
|
||||
func (m *BootstrappingSnapshotInfo) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_BootstrappingSnapshotInfo.Unmarshal(m, b)
|
||||
}
|
||||
func (m *BootstrappingSnapshotInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_BootstrappingSnapshotInfo.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *BootstrappingSnapshotInfo) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_BootstrappingSnapshotInfo.Merge(m, src)
|
||||
}
|
||||
func (m *BootstrappingSnapshotInfo) XXX_Size() int {
|
||||
return xxx_messageInfo_BootstrappingSnapshotInfo.Size(m)
|
||||
}
|
||||
func (m *BootstrappingSnapshotInfo) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_BootstrappingSnapshotInfo.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_BootstrappingSnapshotInfo proto.InternalMessageInfo
|
||||
|
||||
func (m *BootstrappingSnapshotInfo) GetLastBlockInSnapshot() uint64 {
|
||||
if m != nil {
|
||||
return m.LastBlockInSnapshot
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*BlockchainInfo)(nil), "common.BlockchainInfo")
|
||||
proto.RegisterType((*BootstrappingSnapshotInfo)(nil), "common.BootstrappingSnapshotInfo")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("common/ledger.proto", fileDescriptor_da3410306adbea27) }
|
||||
|
||||
var fileDescriptor_da3410306adbea27 = []byte{
|
||||
// 252 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x90, 0xc1, 0x4a, 0xc4, 0x30,
|
||||
0x10, 0x86, 0x89, 0x2e, 0x3d, 0x44, 0x11, 0xcd, 0x82, 0x74, 0x6f, 0x75, 0xf1, 0x50, 0xc4, 0x4d,
|
||||
0x45, 0xdf, 0xa0, 0x27, 0xf7, 0xe0, 0xa5, 0x82, 0x07, 0x2f, 0x92, 0xc6, 0x6c, 0x12, 0xec, 0x66,
|
||||
0xc2, 0x24, 0x15, 0x7c, 0x5e, 0x5f, 0x44, 0xb6, 0xa9, 0x28, 0xec, 0xf6, 0x38, 0xff, 0xff, 0x4d,
|
||||
0xf8, 0x32, 0x74, 0x2e, 0x61, 0xbb, 0x05, 0x57, 0x75, 0xea, 0x5d, 0x2b, 0xe4, 0x1e, 0x21, 0x02,
|
||||
0xcb, 0x52, 0xb8, 0xfc, 0x26, 0xf4, 0xac, 0xee, 0x40, 0x7e, 0x48, 0x23, 0xac, 0x5b, 0xbb, 0x0d,
|
||||
0xb0, 0x4b, 0x9a, 0x19, 0x65, 0xb5, 0x89, 0x39, 0x29, 0x48, 0x39, 0x6b, 0xc6, 0x89, 0xdd, 0xd0,
|
||||
0x73, 0xd9, 0x23, 0x2a, 0x17, 0x87, 0x85, 0x47, 0x11, 0x4c, 0x7e, 0x54, 0x90, 0xf2, 0xb4, 0xd9,
|
||||
0xcb, 0xd9, 0x2d, 0xbd, 0xf0, 0xa8, 0x3e, 0x2d, 0xf4, 0xe1, 0x0f, 0x3e, 0x1e, 0xe0, 0xfd, 0x82,
|
||||
0xbd, 0xd1, 0x45, 0x0b, 0x10, 0x43, 0x44, 0xe1, 0xbd, 0x75, 0xfa, 0xd9, 0x09, 0x1f, 0x0c, 0xc4,
|
||||
0x9d, 0x4e, 0x3e, 0x2b, 0x48, 0x79, 0x72, 0x7f, 0xc5, 0x93, 0x30, 0xaf, 0xa7, 0xc0, 0x66, 0xfa,
|
||||
0x8d, 0xe5, 0x13, 0x5d, 0x4c, 0xee, 0xb1, 0x3b, 0x3a, 0xef, 0x44, 0x48, 0xf2, 0x6b, 0xf7, 0x5b,
|
||||
0x8d, 0x9f, 0x3f, 0x54, 0xd5, 0x2f, 0xf4, 0x1a, 0x50, 0x73, 0xf3, 0xe5, 0x15, 0x8e, 0x57, 0xdd,
|
||||
0x88, 0x16, 0xad, 0x4c, 0xc7, 0x0d, 0xa3, 0xeb, 0x2b, 0xd7, 0x36, 0x9a, 0xbe, 0xdd, 0x8d, 0xd5,
|
||||
0x3f, 0xb8, 0x4a, 0xf0, 0x2a, 0xc1, 0x2b, 0x0d, 0x55, 0xe2, 0xdb, 0x6c, 0x48, 0x1e, 0x7e, 0x02,
|
||||
0x00, 0x00, 0xff, 0xff, 0x34, 0xc4, 0xfd, 0x0b, 0xb2, 0x01, 0x00, 0x00,
|
||||
}
|
510
chaincode/vendor/github.com/hyperledger/fabric-protos-go/common/policies.pb.go
generated
vendored
510
chaincode/vendor/github.com/hyperledger/fabric-protos-go/common/policies.pb.go
generated
vendored
@ -0,0 +1,510 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: common/policies.proto
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
msp "github.com/hyperledger/fabric-protos-go/msp"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
type Policy_PolicyType int32
|
||||
|
||||
const (
|
||||
Policy_UNKNOWN Policy_PolicyType = 0
|
||||
Policy_SIGNATURE Policy_PolicyType = 1
|
||||
Policy_MSP Policy_PolicyType = 2
|
||||
Policy_IMPLICIT_META Policy_PolicyType = 3
|
||||
)
|
||||
|
||||
var Policy_PolicyType_name = map[int32]string{
|
||||
0: "UNKNOWN",
|
||||
1: "SIGNATURE",
|
||||
2: "MSP",
|
||||
3: "IMPLICIT_META",
|
||||
}
|
||||
|
||||
var Policy_PolicyType_value = map[string]int32{
|
||||
"UNKNOWN": 0,
|
||||
"SIGNATURE": 1,
|
||||
"MSP": 2,
|
||||
"IMPLICIT_META": 3,
|
||||
}
|
||||
|
||||
func (x Policy_PolicyType) String() string {
|
||||
return proto.EnumName(Policy_PolicyType_name, int32(x))
|
||||
}
|
||||
|
||||
func (Policy_PolicyType) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_0d02cf0d453425a3, []int{0, 0}
|
||||
}
|
||||
|
||||
type ImplicitMetaPolicy_Rule int32
|
||||
|
||||
const (
|
||||
ImplicitMetaPolicy_ANY ImplicitMetaPolicy_Rule = 0
|
||||
ImplicitMetaPolicy_ALL ImplicitMetaPolicy_Rule = 1
|
||||
ImplicitMetaPolicy_MAJORITY ImplicitMetaPolicy_Rule = 2
|
||||
)
|
||||
|
||||
var ImplicitMetaPolicy_Rule_name = map[int32]string{
|
||||
0: "ANY",
|
||||
1: "ALL",
|
||||
2: "MAJORITY",
|
||||
}
|
||||
|
||||
var ImplicitMetaPolicy_Rule_value = map[string]int32{
|
||||
"ANY": 0,
|
||||
"ALL": 1,
|
||||
"MAJORITY": 2,
|
||||
}
|
||||
|
||||
func (x ImplicitMetaPolicy_Rule) String() string {
|
||||
return proto.EnumName(ImplicitMetaPolicy_Rule_name, int32(x))
|
||||
}
|
||||
|
||||
func (ImplicitMetaPolicy_Rule) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_0d02cf0d453425a3, []int{3, 0}
|
||||
}
|
||||
|
||||
// Policy expresses a policy which the orderer can evaluate, because there has been some desire expressed to support
|
||||
// multiple policy engines, this is typed as a oneof for now
|
||||
type Policy struct {
|
||||
Type int32 `protobuf:"varint,1,opt,name=type,proto3" json:"type,omitempty"`
|
||||
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Policy) Reset() { *m = Policy{} }
|
||||
func (m *Policy) String() string { return proto.CompactTextString(m) }
|
||||
func (*Policy) ProtoMessage() {}
|
||||
func (*Policy) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_0d02cf0d453425a3, []int{0}
|
||||
}
|
||||
|
||||
func (m *Policy) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Policy.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Policy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Policy.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Policy) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Policy.Merge(m, src)
|
||||
}
|
||||
func (m *Policy) XXX_Size() int {
|
||||
return xxx_messageInfo_Policy.Size(m)
|
||||
}
|
||||
func (m *Policy) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Policy.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Policy proto.InternalMessageInfo
|
||||
|
||||
func (m *Policy) GetType() int32 {
|
||||
if m != nil {
|
||||
return m.Type
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Policy) GetValue() []byte {
|
||||
if m != nil {
|
||||
return m.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SignaturePolicyEnvelope wraps a SignaturePolicy and includes a version for future enhancements
|
||||
type SignaturePolicyEnvelope struct {
|
||||
Version int32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"`
|
||||
Rule *SignaturePolicy `protobuf:"bytes,2,opt,name=rule,proto3" json:"rule,omitempty"`
|
||||
Identities []*msp.MSPPrincipal `protobuf:"bytes,3,rep,name=identities,proto3" json:"identities,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *SignaturePolicyEnvelope) Reset() { *m = SignaturePolicyEnvelope{} }
|
||||
func (m *SignaturePolicyEnvelope) String() string { return proto.CompactTextString(m) }
|
||||
func (*SignaturePolicyEnvelope) ProtoMessage() {}
|
||||
func (*SignaturePolicyEnvelope) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_0d02cf0d453425a3, []int{1}
|
||||
}
|
||||
|
||||
func (m *SignaturePolicyEnvelope) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_SignaturePolicyEnvelope.Unmarshal(m, b)
|
||||
}
|
||||
func (m *SignaturePolicyEnvelope) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_SignaturePolicyEnvelope.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *SignaturePolicyEnvelope) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_SignaturePolicyEnvelope.Merge(m, src)
|
||||
}
|
||||
func (m *SignaturePolicyEnvelope) XXX_Size() int {
|
||||
return xxx_messageInfo_SignaturePolicyEnvelope.Size(m)
|
||||
}
|
||||
func (m *SignaturePolicyEnvelope) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_SignaturePolicyEnvelope.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_SignaturePolicyEnvelope proto.InternalMessageInfo
|
||||
|
||||
func (m *SignaturePolicyEnvelope) GetVersion() int32 {
|
||||
if m != nil {
|
||||
return m.Version
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *SignaturePolicyEnvelope) GetRule() *SignaturePolicy {
|
||||
if m != nil {
|
||||
return m.Rule
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *SignaturePolicyEnvelope) GetIdentities() []*msp.MSPPrincipal {
|
||||
if m != nil {
|
||||
return m.Identities
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SignaturePolicy is a recursive message structure which defines a featherweight DSL for describing
|
||||
// policies which are more complicated than 'exactly this signature'. The NOutOf operator is sufficent
|
||||
// to express AND as well as OR, as well as of course N out of the following M policies
|
||||
// SignedBy implies that the signature is from a valid certificate which is signed by the trusted
|
||||
// authority specified in the bytes. This will be the certificate itself for a self-signed certificate
|
||||
// and will be the CA for more traditional certificates
|
||||
type SignaturePolicy struct {
|
||||
// Types that are valid to be assigned to Type:
|
||||
// *SignaturePolicy_SignedBy
|
||||
// *SignaturePolicy_NOutOf_
|
||||
Type isSignaturePolicy_Type `protobuf_oneof:"Type"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *SignaturePolicy) Reset() { *m = SignaturePolicy{} }
|
||||
func (m *SignaturePolicy) String() string { return proto.CompactTextString(m) }
|
||||
func (*SignaturePolicy) ProtoMessage() {}
|
||||
func (*SignaturePolicy) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_0d02cf0d453425a3, []int{2}
|
||||
}
|
||||
|
||||
func (m *SignaturePolicy) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_SignaturePolicy.Unmarshal(m, b)
|
||||
}
|
||||
func (m *SignaturePolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_SignaturePolicy.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *SignaturePolicy) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_SignaturePolicy.Merge(m, src)
|
||||
}
|
||||
func (m *SignaturePolicy) XXX_Size() int {
|
||||
return xxx_messageInfo_SignaturePolicy.Size(m)
|
||||
}
|
||||
func (m *SignaturePolicy) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_SignaturePolicy.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_SignaturePolicy proto.InternalMessageInfo
|
||||
|
||||
type isSignaturePolicy_Type interface {
|
||||
isSignaturePolicy_Type()
|
||||
}
|
||||
|
||||
type SignaturePolicy_SignedBy struct {
|
||||
SignedBy int32 `protobuf:"varint,1,opt,name=signed_by,json=signedBy,proto3,oneof"`
|
||||
}
|
||||
|
||||
type SignaturePolicy_NOutOf_ struct {
|
||||
NOutOf *SignaturePolicy_NOutOf `protobuf:"bytes,2,opt,name=n_out_of,json=nOutOf,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*SignaturePolicy_SignedBy) isSignaturePolicy_Type() {}
|
||||
|
||||
func (*SignaturePolicy_NOutOf_) isSignaturePolicy_Type() {}
|
||||
|
||||
func (m *SignaturePolicy) GetType() isSignaturePolicy_Type {
|
||||
if m != nil {
|
||||
return m.Type
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *SignaturePolicy) GetSignedBy() int32 {
|
||||
if x, ok := m.GetType().(*SignaturePolicy_SignedBy); ok {
|
||||
return x.SignedBy
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *SignaturePolicy) GetNOutOf() *SignaturePolicy_NOutOf {
|
||||
if x, ok := m.GetType().(*SignaturePolicy_NOutOf_); ok {
|
||||
return x.NOutOf
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// XXX_OneofWrappers is for the internal use of the proto package.
|
||||
func (*SignaturePolicy) XXX_OneofWrappers() []interface{} {
|
||||
return []interface{}{
|
||||
(*SignaturePolicy_SignedBy)(nil),
|
||||
(*SignaturePolicy_NOutOf_)(nil),
|
||||
}
|
||||
}
|
||||
|
||||
type SignaturePolicy_NOutOf struct {
|
||||
N int32 `protobuf:"varint,1,opt,name=n,proto3" json:"n,omitempty"`
|
||||
Rules []*SignaturePolicy `protobuf:"bytes,2,rep,name=rules,proto3" json:"rules,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *SignaturePolicy_NOutOf) Reset() { *m = SignaturePolicy_NOutOf{} }
|
||||
func (m *SignaturePolicy_NOutOf) String() string { return proto.CompactTextString(m) }
|
||||
func (*SignaturePolicy_NOutOf) ProtoMessage() {}
|
||||
func (*SignaturePolicy_NOutOf) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_0d02cf0d453425a3, []int{2, 0}
|
||||
}
|
||||
|
||||
func (m *SignaturePolicy_NOutOf) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_SignaturePolicy_NOutOf.Unmarshal(m, b)
|
||||
}
|
||||
func (m *SignaturePolicy_NOutOf) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_SignaturePolicy_NOutOf.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *SignaturePolicy_NOutOf) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_SignaturePolicy_NOutOf.Merge(m, src)
|
||||
}
|
||||
func (m *SignaturePolicy_NOutOf) XXX_Size() int {
|
||||
return xxx_messageInfo_SignaturePolicy_NOutOf.Size(m)
|
||||
}
|
||||
func (m *SignaturePolicy_NOutOf) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_SignaturePolicy_NOutOf.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_SignaturePolicy_NOutOf proto.InternalMessageInfo
|
||||
|
||||
func (m *SignaturePolicy_NOutOf) GetN() int32 {
|
||||
if m != nil {
|
||||
return m.N
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *SignaturePolicy_NOutOf) GetRules() []*SignaturePolicy {
|
||||
if m != nil {
|
||||
return m.Rules
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ImplicitMetaPolicy is a policy type which depends on the hierarchical nature of the configuration
|
||||
// It is implicit because the rule is generate implicitly based on the number of sub policies
|
||||
// It is meta because it depends only on the result of other policies
|
||||
// When evaluated, this policy iterates over all immediate child sub-groups, retrieves the policy
|
||||
// of name sub_policy, evaluates the collection and applies the rule.
|
||||
// For example, with 4 sub-groups, and a policy name of "foo", ImplicitMetaPolicy retrieves
|
||||
// each sub-group, retrieves policy "foo" for each subgroup, evaluates it, and, in the case of ANY
|
||||
// 1 satisfied is sufficient, ALL would require 4 signatures, and MAJORITY would require 3 signatures.
|
||||
type ImplicitMetaPolicy struct {
|
||||
SubPolicy string `protobuf:"bytes,1,opt,name=sub_policy,json=subPolicy,proto3" json:"sub_policy,omitempty"`
|
||||
Rule ImplicitMetaPolicy_Rule `protobuf:"varint,2,opt,name=rule,proto3,enum=common.ImplicitMetaPolicy_Rule" json:"rule,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ImplicitMetaPolicy) Reset() { *m = ImplicitMetaPolicy{} }
|
||||
func (m *ImplicitMetaPolicy) String() string { return proto.CompactTextString(m) }
|
||||
func (*ImplicitMetaPolicy) ProtoMessage() {}
|
||||
func (*ImplicitMetaPolicy) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_0d02cf0d453425a3, []int{3}
|
||||
}
|
||||
|
||||
func (m *ImplicitMetaPolicy) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ImplicitMetaPolicy.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ImplicitMetaPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ImplicitMetaPolicy.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ImplicitMetaPolicy) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ImplicitMetaPolicy.Merge(m, src)
|
||||
}
|
||||
func (m *ImplicitMetaPolicy) XXX_Size() int {
|
||||
return xxx_messageInfo_ImplicitMetaPolicy.Size(m)
|
||||
}
|
||||
func (m *ImplicitMetaPolicy) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ImplicitMetaPolicy.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ImplicitMetaPolicy proto.InternalMessageInfo
|
||||
|
||||
func (m *ImplicitMetaPolicy) GetSubPolicy() string {
|
||||
if m != nil {
|
||||
return m.SubPolicy
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ImplicitMetaPolicy) GetRule() ImplicitMetaPolicy_Rule {
|
||||
if m != nil {
|
||||
return m.Rule
|
||||
}
|
||||
return ImplicitMetaPolicy_ANY
|
||||
}
|
||||
|
||||
// ApplicationPolicy captures the diffenrent policy types that
|
||||
// are set and evaluted at the application level.
|
||||
//
|
||||
// Deprecated: Do not use.
|
||||
type ApplicationPolicy struct {
|
||||
// Types that are valid to be assigned to Type:
|
||||
// *ApplicationPolicy_SignaturePolicy
|
||||
// *ApplicationPolicy_ChannelConfigPolicyReference
|
||||
Type isApplicationPolicy_Type `protobuf_oneof:"Type"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ApplicationPolicy) Reset() { *m = ApplicationPolicy{} }
|
||||
func (m *ApplicationPolicy) String() string { return proto.CompactTextString(m) }
|
||||
func (*ApplicationPolicy) ProtoMessage() {}
|
||||
func (*ApplicationPolicy) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_0d02cf0d453425a3, []int{4}
|
||||
}
|
||||
|
||||
func (m *ApplicationPolicy) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ApplicationPolicy.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ApplicationPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ApplicationPolicy.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ApplicationPolicy) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ApplicationPolicy.Merge(m, src)
|
||||
}
|
||||
func (m *ApplicationPolicy) XXX_Size() int {
|
||||
return xxx_messageInfo_ApplicationPolicy.Size(m)
|
||||
}
|
||||
func (m *ApplicationPolicy) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ApplicationPolicy.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ApplicationPolicy proto.InternalMessageInfo
|
||||
|
||||
type isApplicationPolicy_Type interface {
|
||||
isApplicationPolicy_Type()
|
||||
}
|
||||
|
||||
type ApplicationPolicy_SignaturePolicy struct {
|
||||
SignaturePolicy *SignaturePolicyEnvelope `protobuf:"bytes,1,opt,name=signature_policy,json=signaturePolicy,proto3,oneof"`
|
||||
}
|
||||
|
||||
type ApplicationPolicy_ChannelConfigPolicyReference struct {
|
||||
ChannelConfigPolicyReference string `protobuf:"bytes,2,opt,name=channel_config_policy_reference,json=channelConfigPolicyReference,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*ApplicationPolicy_SignaturePolicy) isApplicationPolicy_Type() {}
|
||||
|
||||
func (*ApplicationPolicy_ChannelConfigPolicyReference) isApplicationPolicy_Type() {}
|
||||
|
||||
func (m *ApplicationPolicy) GetType() isApplicationPolicy_Type {
|
||||
if m != nil {
|
||||
return m.Type
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ApplicationPolicy) GetSignaturePolicy() *SignaturePolicyEnvelope {
|
||||
if x, ok := m.GetType().(*ApplicationPolicy_SignaturePolicy); ok {
|
||||
return x.SignaturePolicy
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ApplicationPolicy) GetChannelConfigPolicyReference() string {
|
||||
if x, ok := m.GetType().(*ApplicationPolicy_ChannelConfigPolicyReference); ok {
|
||||
return x.ChannelConfigPolicyReference
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// XXX_OneofWrappers is for the internal use of the proto package.
|
||||
func (*ApplicationPolicy) XXX_OneofWrappers() []interface{} {
|
||||
return []interface{}{
|
||||
(*ApplicationPolicy_SignaturePolicy)(nil),
|
||||
(*ApplicationPolicy_ChannelConfigPolicyReference)(nil),
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("common.Policy_PolicyType", Policy_PolicyType_name, Policy_PolicyType_value)
|
||||
proto.RegisterEnum("common.ImplicitMetaPolicy_Rule", ImplicitMetaPolicy_Rule_name, ImplicitMetaPolicy_Rule_value)
|
||||
proto.RegisterType((*Policy)(nil), "common.Policy")
|
||||
proto.RegisterType((*SignaturePolicyEnvelope)(nil), "common.SignaturePolicyEnvelope")
|
||||
proto.RegisterType((*SignaturePolicy)(nil), "common.SignaturePolicy")
|
||||
proto.RegisterType((*SignaturePolicy_NOutOf)(nil), "common.SignaturePolicy.NOutOf")
|
||||
proto.RegisterType((*ImplicitMetaPolicy)(nil), "common.ImplicitMetaPolicy")
|
||||
proto.RegisterType((*ApplicationPolicy)(nil), "common.ApplicationPolicy")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("common/policies.proto", fileDescriptor_0d02cf0d453425a3) }
|
||||
|
||||
var fileDescriptor_0d02cf0d453425a3 = []byte{
|
||||
// 559 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x93, 0xd1, 0x4e, 0xdb, 0x30,
|
||||
0x14, 0x86, 0xe3, 0xb6, 0x04, 0x7a, 0x0a, 0x23, 0x58, 0x4c, 0x44, 0x68, 0x1b, 0x28, 0x9a, 0x26,
|
||||
0xa4, 0x89, 0x54, 0x82, 0x5d, 0x71, 0x57, 0x58, 0x45, 0xb3, 0x35, 0x69, 0xe5, 0x96, 0x4d, 0xec,
|
||||
0x26, 0x4a, 0x82, 0x1b, 0x2c, 0xa5, 0x76, 0x14, 0x27, 0x68, 0x7d, 0x8b, 0x5d, 0xed, 0x51, 0x76,
|
||||
0xb3, 0x97, 0x9b, 0x12, 0x27, 0x5b, 0xc5, 0xd4, 0x3b, 0x9f, 0x93, 0xcf, 0xbf, 0xff, 0xff, 0xd8,
|
||||
0x81, 0x97, 0x91, 0x58, 0x2e, 0x05, 0xef, 0xa7, 0x22, 0x61, 0x11, 0xa3, 0xd2, 0x4e, 0x33, 0x91,
|
||||
0x0b, 0xac, 0xab, 0xf6, 0xf1, 0xd1, 0x52, 0xa6, 0xfd, 0xa5, 0x4c, 0xfd, 0x34, 0x63, 0x3c, 0x62,
|
||||
0x69, 0x90, 0x28, 0xc0, 0xfa, 0x0e, 0xfa, 0xb4, 0xdc, 0xb2, 0xc2, 0x18, 0x3a, 0xf9, 0x2a, 0xa5,
|
||||
0x26, 0x3a, 0x45, 0x67, 0x5b, 0xa4, 0x5a, 0xe3, 0x43, 0xd8, 0x7a, 0x0a, 0x92, 0x82, 0x9a, 0xad,
|
||||
0x53, 0x74, 0xb6, 0x4b, 0x54, 0x61, 0x7d, 0x04, 0x50, 0x7b, 0xe6, 0x25, 0xd3, 0x83, 0xed, 0x3b,
|
||||
0xef, 0xb3, 0x37, 0xf9, 0xea, 0x19, 0x1a, 0xde, 0x83, 0xee, 0xcc, 0xb9, 0xf5, 0x06, 0xf3, 0x3b,
|
||||
0x32, 0x34, 0x10, 0xde, 0x86, 0xb6, 0x3b, 0x9b, 0x1a, 0x2d, 0x7c, 0x00, 0x7b, 0x8e, 0x3b, 0x1d,
|
||||
0x3b, 0x37, 0xce, 0xdc, 0x77, 0x87, 0xf3, 0x81, 0xd1, 0xb6, 0x7e, 0x22, 0x38, 0x9a, 0xb1, 0x98,
|
||||
0x07, 0x79, 0x91, 0x51, 0xa5, 0x37, 0xe4, 0x4f, 0x34, 0x11, 0x29, 0xc5, 0x26, 0x6c, 0x3f, 0xd1,
|
||||
0x4c, 0x32, 0xc1, 0x6b, 0x3b, 0x4d, 0x89, 0xdf, 0x43, 0x27, 0x2b, 0x12, 0x65, 0xa8, 0x77, 0x71,
|
||||
0x64, 0xab, 0x7c, 0xf6, 0x33, 0x21, 0x52, 0x41, 0xf8, 0x03, 0x00, 0x7b, 0xa0, 0x3c, 0x67, 0x39,
|
||||
0xa3, 0xd2, 0x6c, 0x9f, 0xb6, 0xcf, 0x7a, 0x17, 0x87, 0xcd, 0x16, 0x77, 0x36, 0x9d, 0x36, 0xc3,
|
||||
0x20, 0x6b, 0x9c, 0xf5, 0x1b, 0xc1, 0xfe, 0x33, 0x3d, 0xfc, 0x1a, 0xba, 0x92, 0xc5, 0x9c, 0x3e,
|
||||
0xf8, 0xe1, 0x4a, 0x59, 0x1a, 0x69, 0x64, 0x47, 0xb5, 0xae, 0x57, 0xf8, 0x0a, 0x76, 0xb8, 0x2f,
|
||||
0x8a, 0xdc, 0x17, 0x8b, 0xda, 0xd9, 0x9b, 0x0d, 0xce, 0x6c, 0x6f, 0x52, 0xe4, 0x93, 0xc5, 0x48,
|
||||
0x23, 0x3a, 0xaf, 0x56, 0xc7, 0x43, 0xd0, 0x55, 0x0f, 0xef, 0x02, 0x6a, 0xf2, 0x22, 0x8e, 0xcf,
|
||||
0x61, 0xab, 0x0c, 0x21, 0xcd, 0x56, 0xe5, 0x7b, 0x63, 0x54, 0x45, 0x5d, 0xeb, 0xd0, 0x29, 0xaf,
|
||||
0xc3, 0xfa, 0x81, 0x00, 0x3b, 0xcb, 0xb4, 0x7c, 0x05, 0xb9, 0x4b, 0xf3, 0xe0, 0x6f, 0x00, 0x90,
|
||||
0x45, 0xe8, 0x57, 0xcf, 0x43, 0x25, 0xe8, 0x92, 0xae, 0x2c, 0xc2, 0xfa, 0xf3, 0xe5, 0xda, 0x58,
|
||||
0x5f, 0x5c, 0x9c, 0x34, 0x67, 0xfd, 0x2f, 0x64, 0x93, 0x22, 0xa1, 0x6a, 0xbc, 0xd6, 0x3b, 0xe8,
|
||||
0x94, 0x55, 0x79, 0xcb, 0x03, 0xef, 0xde, 0xd0, 0xaa, 0xc5, 0x78, 0x6c, 0x20, 0xbc, 0x0b, 0x3b,
|
||||
0xee, 0xe0, 0xd3, 0x84, 0x38, 0xf3, 0x7b, 0xa3, 0x65, 0xfd, 0x42, 0x70, 0x30, 0x48, 0x4b, 0xa5,
|
||||
0x20, 0x67, 0x82, 0xd7, 0x47, 0x8e, 0xc1, 0x90, 0x4d, 0x94, 0x75, 0x5f, 0xbd, 0x7f, 0xc7, 0x6f,
|
||||
0x78, 0x1e, 0x23, 0x8d, 0xec, 0xcb, 0x67, 0x17, 0x74, 0x0b, 0x27, 0xd1, 0x63, 0xc0, 0x39, 0x4d,
|
||||
0xfc, 0x48, 0xf0, 0x05, 0x8b, 0x6b, 0x49, 0x3f, 0xa3, 0x0b, 0x9a, 0x51, 0x1e, 0xa9, 0x6c, 0xdd,
|
||||
0x91, 0x46, 0x5e, 0xd5, 0xe0, 0x4d, 0xc5, 0xd5, 0x53, 0x6c, 0xa8, 0xab, 0x96, 0x89, 0x9a, 0x59,
|
||||
0x5e, 0x7f, 0x81, 0xb7, 0x22, 0x8b, 0xed, 0xc7, 0x55, 0x4a, 0xb3, 0x84, 0x3e, 0xc4, 0x34, 0xb3,
|
||||
0x17, 0x41, 0x98, 0xb1, 0x48, 0xfd, 0x3c, 0xb2, 0xf6, 0xf9, 0xcd, 0x8e, 0x59, 0xfe, 0x58, 0x84,
|
||||
0x65, 0xd9, 0x5f, 0x83, 0xfb, 0x0a, 0x3e, 0x57, 0xf0, 0x79, 0x2c, 0xfa, 0x8a, 0x0f, 0xf5, 0xaa,
|
||||
0x73, 0xf9, 0x27, 0x00, 0x00, 0xff, 0xff, 0x77, 0x84, 0xc8, 0xc7, 0xb5, 0x03, 0x00, 0x00,
|
||||
}
|
@ -0,0 +1,175 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: ledger/queryresult/kv_query_result.proto
|
||||
|
||||
package queryresult
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
timestamp "github.com/golang/protobuf/ptypes/timestamp"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// KV -- QueryResult for range/execute query. Holds a key and corresponding value.
|
||||
type KV struct {
|
||||
Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"`
|
||||
Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
|
||||
Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *KV) Reset() { *m = KV{} }
|
||||
func (m *KV) String() string { return proto.CompactTextString(m) }
|
||||
func (*KV) ProtoMessage() {}
|
||||
func (*KV) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_f8ee2fe66594a8f2, []int{0}
|
||||
}
|
||||
|
||||
func (m *KV) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_KV.Unmarshal(m, b)
|
||||
}
|
||||
func (m *KV) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_KV.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *KV) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_KV.Merge(m, src)
|
||||
}
|
||||
func (m *KV) XXX_Size() int {
|
||||
return xxx_messageInfo_KV.Size(m)
|
||||
}
|
||||
func (m *KV) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_KV.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_KV proto.InternalMessageInfo
|
||||
|
||||
func (m *KV) GetNamespace() string {
|
||||
if m != nil {
|
||||
return m.Namespace
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *KV) GetKey() string {
|
||||
if m != nil {
|
||||
return m.Key
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *KV) GetValue() []byte {
|
||||
if m != nil {
|
||||
return m.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// KeyModification -- QueryResult for history query. Holds a transaction ID, value,
|
||||
// timestamp, and delete marker which resulted from a history query.
|
||||
type KeyModification struct {
|
||||
TxId string `protobuf:"bytes,1,opt,name=tx_id,json=txId,proto3" json:"tx_id,omitempty"`
|
||||
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||
Timestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
||||
IsDelete bool `protobuf:"varint,4,opt,name=is_delete,json=isDelete,proto3" json:"is_delete,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *KeyModification) Reset() { *m = KeyModification{} }
|
||||
func (m *KeyModification) String() string { return proto.CompactTextString(m) }
|
||||
func (*KeyModification) ProtoMessage() {}
|
||||
func (*KeyModification) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_f8ee2fe66594a8f2, []int{1}
|
||||
}
|
||||
|
||||
func (m *KeyModification) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_KeyModification.Unmarshal(m, b)
|
||||
}
|
||||
func (m *KeyModification) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_KeyModification.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *KeyModification) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_KeyModification.Merge(m, src)
|
||||
}
|
||||
func (m *KeyModification) XXX_Size() int {
|
||||
return xxx_messageInfo_KeyModification.Size(m)
|
||||
}
|
||||
func (m *KeyModification) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_KeyModification.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_KeyModification proto.InternalMessageInfo
|
||||
|
||||
func (m *KeyModification) GetTxId() string {
|
||||
if m != nil {
|
||||
return m.TxId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *KeyModification) GetValue() []byte {
|
||||
if m != nil {
|
||||
return m.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *KeyModification) GetTimestamp() *timestamp.Timestamp {
|
||||
if m != nil {
|
||||
return m.Timestamp
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *KeyModification) GetIsDelete() bool {
|
||||
if m != nil {
|
||||
return m.IsDelete
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*KV)(nil), "queryresult.KV")
|
||||
proto.RegisterType((*KeyModification)(nil), "queryresult.KeyModification")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("ledger/queryresult/kv_query_result.proto", fileDescriptor_f8ee2fe66594a8f2)
|
||||
}
|
||||
|
||||
var fileDescriptor_f8ee2fe66594a8f2 = []byte{
|
||||
// 290 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x91, 0x4f, 0x4b, 0xc3, 0x30,
|
||||
0x18, 0xc6, 0xe9, 0xfe, 0xc8, 0x9a, 0x09, 0x4a, 0xf4, 0x50, 0xa6, 0x60, 0xd9, 0xa9, 0x97, 0x25,
|
||||
0xa2, 0x17, 0xf1, 0x28, 0x5e, 0x74, 0x78, 0x29, 0xe2, 0xc1, 0x4b, 0x49, 0xdb, 0xb7, 0x59, 0x58,
|
||||
0xdb, 0xd4, 0x24, 0x1d, 0xeb, 0xe7, 0xf0, 0x0b, 0x8b, 0xc9, 0x66, 0x0b, 0xde, 0xf2, 0xbc, 0xef,
|
||||
0xf3, 0x7b, 0x78, 0x78, 0x83, 0xa2, 0x12, 0x72, 0x0e, 0x8a, 0x7e, 0xb5, 0xa0, 0x3a, 0x05, 0xba,
|
||||
0x2d, 0x0d, 0xdd, 0xee, 0x12, 0x2b, 0x13, 0xa7, 0x49, 0xa3, 0xa4, 0x91, 0x78, 0x3e, 0xb0, 0x2c,
|
||||
0x6e, 0xb8, 0x94, 0xbc, 0x04, 0x6a, 0x57, 0x69, 0x5b, 0x50, 0x23, 0x2a, 0xd0, 0x86, 0x55, 0x8d,
|
||||
0x73, 0x2f, 0x5f, 0xd1, 0x68, 0xfd, 0x81, 0xaf, 0x91, 0x5f, 0xb3, 0x0a, 0x74, 0xc3, 0x32, 0x08,
|
||||
0xbc, 0xd0, 0x8b, 0xfc, 0xb8, 0x1f, 0xe0, 0x73, 0x34, 0xde, 0x42, 0x17, 0x8c, 0xec, 0xfc, 0xf7,
|
||||
0x89, 0x2f, 0xd1, 0x74, 0xc7, 0xca, 0x16, 0x82, 0x71, 0xe8, 0x45, 0xa7, 0xb1, 0x13, 0xcb, 0x6f,
|
||||
0x0f, 0x9d, 0xad, 0xa1, 0x7b, 0x93, 0xb9, 0x28, 0x44, 0xc6, 0x8c, 0x90, 0x35, 0xbe, 0x40, 0x53,
|
||||
0xb3, 0x4f, 0x44, 0x7e, 0x48, 0x9d, 0x98, 0xfd, 0x4b, 0xde, 0xe3, 0xa3, 0x01, 0x8e, 0x1f, 0x90,
|
||||
0xff, 0xd7, 0xce, 0x06, 0xcf, 0xef, 0x16, 0xc4, 0xf5, 0x27, 0xc7, 0xfe, 0xe4, 0xfd, 0xe8, 0x88,
|
||||
0x7b, 0x33, 0xbe, 0x42, 0xbe, 0xd0, 0x49, 0x0e, 0x25, 0x18, 0x08, 0x26, 0xa1, 0x17, 0xcd, 0xe2,
|
||||
0x99, 0xd0, 0xcf, 0x56, 0x3f, 0xd5, 0xe8, 0x56, 0x2a, 0x4e, 0x36, 0x5d, 0x03, 0xca, 0x1d, 0x91,
|
||||
0x14, 0x2c, 0x55, 0x22, 0x73, 0xa1, 0x9a, 0x1c, 0x86, 0x83, 0xb3, 0x7d, 0x3e, 0x72, 0x61, 0x36,
|
||||
0x6d, 0x4a, 0x32, 0x59, 0xd1, 0x01, 0x48, 0x1d, 0xb8, 0x72, 0xe0, 0x8a, 0x4b, 0xfa, 0xff, 0x57,
|
||||
0xd2, 0x13, 0xbb, 0xbd, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x07, 0x6d, 0xd7, 0x47, 0xb2, 0x01,
|
||||
0x00, 0x00,
|
||||
}
|
392
chaincode/vendor/github.com/hyperledger/fabric-protos-go/ledger/rwset/rwset.pb.go
generated
vendored
392
chaincode/vendor/github.com/hyperledger/fabric-protos-go/ledger/rwset/rwset.pb.go
generated
vendored
@ -0,0 +1,392 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: ledger/rwset/rwset.proto
|
||||
|
||||
package rwset
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
type TxReadWriteSet_DataModel int32
|
||||
|
||||
const (
|
||||
TxReadWriteSet_KV TxReadWriteSet_DataModel = 0
|
||||
)
|
||||
|
||||
var TxReadWriteSet_DataModel_name = map[int32]string{
|
||||
0: "KV",
|
||||
}
|
||||
|
||||
var TxReadWriteSet_DataModel_value = map[string]int32{
|
||||
"KV": 0,
|
||||
}
|
||||
|
||||
func (x TxReadWriteSet_DataModel) String() string {
|
||||
return proto.EnumName(TxReadWriteSet_DataModel_name, int32(x))
|
||||
}
|
||||
|
||||
func (TxReadWriteSet_DataModel) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_794d00b812408f20, []int{0, 0}
|
||||
}
|
||||
|
||||
// TxReadWriteSet encapsulates a read-write set for a transaction
|
||||
// DataModel specifies the enum value of the data model
|
||||
// ns_rwset field specifies a list of chaincode specific read-write set (one for each chaincode)
|
||||
type TxReadWriteSet struct {
|
||||
DataModel TxReadWriteSet_DataModel `protobuf:"varint,1,opt,name=data_model,json=dataModel,proto3,enum=rwset.TxReadWriteSet_DataModel" json:"data_model,omitempty"`
|
||||
NsRwset []*NsReadWriteSet `protobuf:"bytes,2,rep,name=ns_rwset,json=nsRwset,proto3" json:"ns_rwset,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *TxReadWriteSet) Reset() { *m = TxReadWriteSet{} }
|
||||
func (m *TxReadWriteSet) String() string { return proto.CompactTextString(m) }
|
||||
func (*TxReadWriteSet) ProtoMessage() {}
|
||||
func (*TxReadWriteSet) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_794d00b812408f20, []int{0}
|
||||
}
|
||||
|
||||
func (m *TxReadWriteSet) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_TxReadWriteSet.Unmarshal(m, b)
|
||||
}
|
||||
func (m *TxReadWriteSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_TxReadWriteSet.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *TxReadWriteSet) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_TxReadWriteSet.Merge(m, src)
|
||||
}
|
||||
func (m *TxReadWriteSet) XXX_Size() int {
|
||||
return xxx_messageInfo_TxReadWriteSet.Size(m)
|
||||
}
|
||||
func (m *TxReadWriteSet) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_TxReadWriteSet.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_TxReadWriteSet proto.InternalMessageInfo
|
||||
|
||||
func (m *TxReadWriteSet) GetDataModel() TxReadWriteSet_DataModel {
|
||||
if m != nil {
|
||||
return m.DataModel
|
||||
}
|
||||
return TxReadWriteSet_KV
|
||||
}
|
||||
|
||||
func (m *TxReadWriteSet) GetNsRwset() []*NsReadWriteSet {
|
||||
if m != nil {
|
||||
return m.NsRwset
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NsReadWriteSet encapsulates the read-write set for a chaincode
|
||||
type NsReadWriteSet struct {
|
||||
Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"`
|
||||
Rwset []byte `protobuf:"bytes,2,opt,name=rwset,proto3" json:"rwset,omitempty"`
|
||||
CollectionHashedRwset []*CollectionHashedReadWriteSet `protobuf:"bytes,3,rep,name=collection_hashed_rwset,json=collectionHashedRwset,proto3" json:"collection_hashed_rwset,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *NsReadWriteSet) Reset() { *m = NsReadWriteSet{} }
|
||||
func (m *NsReadWriteSet) String() string { return proto.CompactTextString(m) }
|
||||
func (*NsReadWriteSet) ProtoMessage() {}
|
||||
func (*NsReadWriteSet) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_794d00b812408f20, []int{1}
|
||||
}
|
||||
|
||||
func (m *NsReadWriteSet) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_NsReadWriteSet.Unmarshal(m, b)
|
||||
}
|
||||
func (m *NsReadWriteSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_NsReadWriteSet.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *NsReadWriteSet) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_NsReadWriteSet.Merge(m, src)
|
||||
}
|
||||
func (m *NsReadWriteSet) XXX_Size() int {
|
||||
return xxx_messageInfo_NsReadWriteSet.Size(m)
|
||||
}
|
||||
func (m *NsReadWriteSet) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_NsReadWriteSet.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_NsReadWriteSet proto.InternalMessageInfo
|
||||
|
||||
func (m *NsReadWriteSet) GetNamespace() string {
|
||||
if m != nil {
|
||||
return m.Namespace
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *NsReadWriteSet) GetRwset() []byte {
|
||||
if m != nil {
|
||||
return m.Rwset
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *NsReadWriteSet) GetCollectionHashedRwset() []*CollectionHashedReadWriteSet {
|
||||
if m != nil {
|
||||
return m.CollectionHashedRwset
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CollectionHashedReadWriteSet encapsulate the hashed representation for the private read-write set for a collection
|
||||
type CollectionHashedReadWriteSet struct {
|
||||
CollectionName string `protobuf:"bytes,1,opt,name=collection_name,json=collectionName,proto3" json:"collection_name,omitempty"`
|
||||
HashedRwset []byte `protobuf:"bytes,2,opt,name=hashed_rwset,json=hashedRwset,proto3" json:"hashed_rwset,omitempty"`
|
||||
PvtRwsetHash []byte `protobuf:"bytes,3,opt,name=pvt_rwset_hash,json=pvtRwsetHash,proto3" json:"pvt_rwset_hash,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *CollectionHashedReadWriteSet) Reset() { *m = CollectionHashedReadWriteSet{} }
|
||||
func (m *CollectionHashedReadWriteSet) String() string { return proto.CompactTextString(m) }
|
||||
func (*CollectionHashedReadWriteSet) ProtoMessage() {}
|
||||
func (*CollectionHashedReadWriteSet) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_794d00b812408f20, []int{2}
|
||||
}
|
||||
|
||||
func (m *CollectionHashedReadWriteSet) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_CollectionHashedReadWriteSet.Unmarshal(m, b)
|
||||
}
|
||||
func (m *CollectionHashedReadWriteSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_CollectionHashedReadWriteSet.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *CollectionHashedReadWriteSet) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_CollectionHashedReadWriteSet.Merge(m, src)
|
||||
}
|
||||
func (m *CollectionHashedReadWriteSet) XXX_Size() int {
|
||||
return xxx_messageInfo_CollectionHashedReadWriteSet.Size(m)
|
||||
}
|
||||
func (m *CollectionHashedReadWriteSet) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_CollectionHashedReadWriteSet.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_CollectionHashedReadWriteSet proto.InternalMessageInfo
|
||||
|
||||
func (m *CollectionHashedReadWriteSet) GetCollectionName() string {
|
||||
if m != nil {
|
||||
return m.CollectionName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *CollectionHashedReadWriteSet) GetHashedRwset() []byte {
|
||||
if m != nil {
|
||||
return m.HashedRwset
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *CollectionHashedReadWriteSet) GetPvtRwsetHash() []byte {
|
||||
if m != nil {
|
||||
return m.PvtRwsetHash
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TxPvtReadWriteSet encapsulate the private read-write set for a transaction
|
||||
type TxPvtReadWriteSet struct {
|
||||
DataModel TxReadWriteSet_DataModel `protobuf:"varint,1,opt,name=data_model,json=dataModel,proto3,enum=rwset.TxReadWriteSet_DataModel" json:"data_model,omitempty"`
|
||||
NsPvtRwset []*NsPvtReadWriteSet `protobuf:"bytes,2,rep,name=ns_pvt_rwset,json=nsPvtRwset,proto3" json:"ns_pvt_rwset,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *TxPvtReadWriteSet) Reset() { *m = TxPvtReadWriteSet{} }
|
||||
func (m *TxPvtReadWriteSet) String() string { return proto.CompactTextString(m) }
|
||||
func (*TxPvtReadWriteSet) ProtoMessage() {}
|
||||
func (*TxPvtReadWriteSet) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_794d00b812408f20, []int{3}
|
||||
}
|
||||
|
||||
func (m *TxPvtReadWriteSet) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_TxPvtReadWriteSet.Unmarshal(m, b)
|
||||
}
|
||||
func (m *TxPvtReadWriteSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_TxPvtReadWriteSet.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *TxPvtReadWriteSet) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_TxPvtReadWriteSet.Merge(m, src)
|
||||
}
|
||||
func (m *TxPvtReadWriteSet) XXX_Size() int {
|
||||
return xxx_messageInfo_TxPvtReadWriteSet.Size(m)
|
||||
}
|
||||
func (m *TxPvtReadWriteSet) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_TxPvtReadWriteSet.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_TxPvtReadWriteSet proto.InternalMessageInfo
|
||||
|
||||
func (m *TxPvtReadWriteSet) GetDataModel() TxReadWriteSet_DataModel {
|
||||
if m != nil {
|
||||
return m.DataModel
|
||||
}
|
||||
return TxReadWriteSet_KV
|
||||
}
|
||||
|
||||
func (m *TxPvtReadWriteSet) GetNsPvtRwset() []*NsPvtReadWriteSet {
|
||||
if m != nil {
|
||||
return m.NsPvtRwset
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NsPvtReadWriteSet encapsulates the private read-write set for a chaincode
|
||||
type NsPvtReadWriteSet struct {
|
||||
Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"`
|
||||
CollectionPvtRwset []*CollectionPvtReadWriteSet `protobuf:"bytes,2,rep,name=collection_pvt_rwset,json=collectionPvtRwset,proto3" json:"collection_pvt_rwset,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *NsPvtReadWriteSet) Reset() { *m = NsPvtReadWriteSet{} }
|
||||
func (m *NsPvtReadWriteSet) String() string { return proto.CompactTextString(m) }
|
||||
func (*NsPvtReadWriteSet) ProtoMessage() {}
|
||||
func (*NsPvtReadWriteSet) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_794d00b812408f20, []int{4}
|
||||
}
|
||||
|
||||
func (m *NsPvtReadWriteSet) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_NsPvtReadWriteSet.Unmarshal(m, b)
|
||||
}
|
||||
func (m *NsPvtReadWriteSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_NsPvtReadWriteSet.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *NsPvtReadWriteSet) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_NsPvtReadWriteSet.Merge(m, src)
|
||||
}
|
||||
func (m *NsPvtReadWriteSet) XXX_Size() int {
|
||||
return xxx_messageInfo_NsPvtReadWriteSet.Size(m)
|
||||
}
|
||||
func (m *NsPvtReadWriteSet) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_NsPvtReadWriteSet.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_NsPvtReadWriteSet proto.InternalMessageInfo
|
||||
|
||||
func (m *NsPvtReadWriteSet) GetNamespace() string {
|
||||
if m != nil {
|
||||
return m.Namespace
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *NsPvtReadWriteSet) GetCollectionPvtRwset() []*CollectionPvtReadWriteSet {
|
||||
if m != nil {
|
||||
return m.CollectionPvtRwset
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CollectionPvtReadWriteSet encapsulates the private read-write set for a collection
|
||||
type CollectionPvtReadWriteSet struct {
|
||||
CollectionName string `protobuf:"bytes,1,opt,name=collection_name,json=collectionName,proto3" json:"collection_name,omitempty"`
|
||||
Rwset []byte `protobuf:"bytes,2,opt,name=rwset,proto3" json:"rwset,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *CollectionPvtReadWriteSet) Reset() { *m = CollectionPvtReadWriteSet{} }
|
||||
func (m *CollectionPvtReadWriteSet) String() string { return proto.CompactTextString(m) }
|
||||
func (*CollectionPvtReadWriteSet) ProtoMessage() {}
|
||||
func (*CollectionPvtReadWriteSet) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_794d00b812408f20, []int{5}
|
||||
}
|
||||
|
||||
func (m *CollectionPvtReadWriteSet) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_CollectionPvtReadWriteSet.Unmarshal(m, b)
|
||||
}
|
||||
func (m *CollectionPvtReadWriteSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_CollectionPvtReadWriteSet.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *CollectionPvtReadWriteSet) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_CollectionPvtReadWriteSet.Merge(m, src)
|
||||
}
|
||||
func (m *CollectionPvtReadWriteSet) XXX_Size() int {
|
||||
return xxx_messageInfo_CollectionPvtReadWriteSet.Size(m)
|
||||
}
|
||||
func (m *CollectionPvtReadWriteSet) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_CollectionPvtReadWriteSet.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_CollectionPvtReadWriteSet proto.InternalMessageInfo
|
||||
|
||||
func (m *CollectionPvtReadWriteSet) GetCollectionName() string {
|
||||
if m != nil {
|
||||
return m.CollectionName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *CollectionPvtReadWriteSet) GetRwset() []byte {
|
||||
if m != nil {
|
||||
return m.Rwset
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("rwset.TxReadWriteSet_DataModel", TxReadWriteSet_DataModel_name, TxReadWriteSet_DataModel_value)
|
||||
proto.RegisterType((*TxReadWriteSet)(nil), "rwset.TxReadWriteSet")
|
||||
proto.RegisterType((*NsReadWriteSet)(nil), "rwset.NsReadWriteSet")
|
||||
proto.RegisterType((*CollectionHashedReadWriteSet)(nil), "rwset.CollectionHashedReadWriteSet")
|
||||
proto.RegisterType((*TxPvtReadWriteSet)(nil), "rwset.TxPvtReadWriteSet")
|
||||
proto.RegisterType((*NsPvtReadWriteSet)(nil), "rwset.NsPvtReadWriteSet")
|
||||
proto.RegisterType((*CollectionPvtReadWriteSet)(nil), "rwset.CollectionPvtReadWriteSet")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("ledger/rwset/rwset.proto", fileDescriptor_794d00b812408f20) }
|
||||
|
||||
var fileDescriptor_794d00b812408f20 = []byte{
|
||||
// 421 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x93, 0xcf, 0xef, 0x93, 0x30,
|
||||
0x18, 0xc6, 0xed, 0x77, 0xd9, 0x94, 0x77, 0x04, 0x5d, 0xdd, 0x22, 0x26, 0x4b, 0x9c, 0xd3, 0xc4,
|
||||
0xc5, 0x64, 0x60, 0xa6, 0x27, 0x0f, 0x1e, 0xd4, 0x83, 0x89, 0x71, 0x31, 0x75, 0xd1, 0x64, 0x1e,
|
||||
0x48, 0x81, 0x0a, 0x24, 0x40, 0x09, 0xad, 0x73, 0xfe, 0x01, 0x9e, 0xf5, 0xe6, 0xd9, 0xff, 0xd4,
|
||||
0xac, 0x65, 0x0c, 0x98, 0xbf, 0x0e, 0x5e, 0x08, 0x7d, 0xfb, 0x3c, 0x3c, 0x9f, 0xf6, 0xe5, 0x05,
|
||||
0x3b, 0x65, 0x61, 0xc4, 0x4a, 0xb7, 0xfc, 0x24, 0x98, 0xd4, 0x4f, 0xa7, 0x28, 0xb9, 0xe4, 0xb8,
|
||||
0xaf, 0x16, 0xf3, 0xef, 0x08, 0xac, 0xcd, 0x9e, 0x30, 0x1a, 0xbe, 0x2b, 0x13, 0xc9, 0xde, 0x30,
|
||||
0x89, 0x9f, 0x00, 0x84, 0x54, 0x52, 0x2f, 0xe3, 0x21, 0x4b, 0x6d, 0x34, 0x43, 0x0b, 0x6b, 0x75,
|
||||
0xcb, 0xd1, 0xde, 0xb6, 0xd4, 0x79, 0x4e, 0x25, 0x7d, 0x75, 0x90, 0x11, 0x23, 0x3c, 0xbe, 0xe2,
|
||||
0x07, 0x70, 0x25, 0x17, 0x9e, 0xd2, 0xdb, 0x17, 0xb3, 0xde, 0x62, 0xb8, 0x9a, 0x54, 0xee, 0xb5,
|
||||
0x68, 0xba, 0xc9, 0xe5, 0x5c, 0x10, 0x05, 0x71, 0x1d, 0x8c, 0xfa, 0x4b, 0x78, 0x00, 0x17, 0x2f,
|
||||
0xdf, 0x5e, 0xbb, 0x34, 0xff, 0x81, 0xc0, 0x6a, 0x1b, 0xf0, 0x14, 0x8c, 0x9c, 0x66, 0x4c, 0x14,
|
||||
0x34, 0x60, 0x0a, 0xcc, 0x20, 0xa7, 0x02, 0x1e, 0x43, 0xff, 0x18, 0x8a, 0x16, 0x26, 0xd1, 0x0b,
|
||||
0xfc, 0x1e, 0x6e, 0x04, 0x3c, 0x4d, 0x59, 0x20, 0x13, 0x9e, 0x7b, 0x31, 0x15, 0x31, 0x0b, 0x2b,
|
||||
0xb8, 0x9e, 0x82, 0xbb, 0x53, 0xc1, 0x3d, 0xab, 0x55, 0x2f, 0x94, 0xa8, 0x85, 0x3a, 0x09, 0xba,
|
||||
0xbb, 0x0a, 0xfc, 0x1b, 0x82, 0xe9, 0x9f, 0x7c, 0xf8, 0x1e, 0x5c, 0x6d, 0xa4, 0x1f, 0x58, 0x2b,
|
||||
0x6e, 0xeb, 0x54, 0x5e, 0xd3, 0x8c, 0xe1, 0xdb, 0x60, 0xb6, 0xd8, 0xf4, 0x19, 0x86, 0xf1, 0x29,
|
||||
0x0c, 0xdf, 0x05, 0xab, 0xd8, 0x49, 0xbd, 0xaf, 0x0e, 0x62, 0xf7, 0x94, 0xc8, 0x2c, 0x76, 0x52,
|
||||
0x29, 0x0e, 0xf9, 0xf3, 0xaf, 0x08, 0x46, 0x9b, 0xfd, 0xeb, 0x9d, 0xfc, 0xaf, 0x3d, 0x7d, 0x0c,
|
||||
0x66, 0x2e, 0xbc, 0x3a, 0xbe, 0xea, 0xab, 0x5d, 0xf7, 0xb5, 0x93, 0x47, 0x20, 0x57, 0x25, 0x75,
|
||||
0x49, 0x5f, 0x10, 0x8c, 0xce, 0x14, 0x7f, 0xe9, 0x25, 0x81, 0x71, 0xe3, 0xde, 0xba, 0xb9, 0xb3,
|
||||
0xb3, 0x96, 0x75, 0xf3, 0x71, 0xd0, 0xda, 0x52, 0x1c, 0x5b, 0xb8, 0xf9, 0x5b, 0xc3, 0xbf, 0x37,
|
||||
0xea, 0x97, 0x7f, 0xd9, 0x53, 0x1f, 0xee, 0xf3, 0x32, 0x72, 0xe2, 0xcf, 0x05, 0x2b, 0xf5, 0xc8,
|
||||
0x39, 0x1f, 0xa8, 0x5f, 0x26, 0x81, 0x9e, 0x36, 0xe1, 0x54, 0x45, 0xa5, 0xde, 0x3e, 0x8a, 0x12,
|
||||
0x19, 0x7f, 0xf4, 0x9d, 0x80, 0x67, 0x6e, 0xc3, 0xe2, 0x6a, 0xcb, 0x52, 0x5b, 0x96, 0x11, 0x77,
|
||||
0x9b, 0xd3, 0xeb, 0x0f, 0x54, 0xfd, 0xe1, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x47, 0x32, 0x8f,
|
||||
0x91, 0xd4, 0x03, 0x00, 0x00,
|
||||
}
|
184
chaincode/vendor/github.com/hyperledger/fabric-protos-go/msp/identities.pb.go
generated
vendored
184
chaincode/vendor/github.com/hyperledger/fabric-protos-go/msp/identities.pb.go
generated
vendored
@ -0,0 +1,184 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: msp/identities.proto
|
||||
|
||||
package msp
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// This struct represents an Identity
|
||||
// (with its MSP identifier) to be used
|
||||
// to serialize it and deserialize it
|
||||
type SerializedIdentity struct {
|
||||
// The identifier of the associated membership service provider
|
||||
Mspid string `protobuf:"bytes,1,opt,name=mspid,proto3" json:"mspid,omitempty"`
|
||||
// the Identity, serialized according to the rules of its MPS
|
||||
IdBytes []byte `protobuf:"bytes,2,opt,name=id_bytes,json=idBytes,proto3" json:"id_bytes,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *SerializedIdentity) Reset() { *m = SerializedIdentity{} }
|
||||
func (m *SerializedIdentity) String() string { return proto.CompactTextString(m) }
|
||||
func (*SerializedIdentity) ProtoMessage() {}
|
||||
func (*SerializedIdentity) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_872f7fc14bf2c238, []int{0}
|
||||
}
|
||||
|
||||
func (m *SerializedIdentity) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_SerializedIdentity.Unmarshal(m, b)
|
||||
}
|
||||
func (m *SerializedIdentity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_SerializedIdentity.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *SerializedIdentity) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_SerializedIdentity.Merge(m, src)
|
||||
}
|
||||
func (m *SerializedIdentity) XXX_Size() int {
|
||||
return xxx_messageInfo_SerializedIdentity.Size(m)
|
||||
}
|
||||
func (m *SerializedIdentity) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_SerializedIdentity.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_SerializedIdentity proto.InternalMessageInfo
|
||||
|
||||
func (m *SerializedIdentity) GetMspid() string {
|
||||
if m != nil {
|
||||
return m.Mspid
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *SerializedIdentity) GetIdBytes() []byte {
|
||||
if m != nil {
|
||||
return m.IdBytes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This struct represents an Idemix Identity
|
||||
// to be used to serialize it and deserialize it.
|
||||
// The IdemixMSP will first serialize an idemix identity to bytes using
|
||||
// this proto, and then uses these bytes as id_bytes in SerializedIdentity
|
||||
type SerializedIdemixIdentity struct {
|
||||
// nym_x is the X-component of the pseudonym elliptic curve point.
|
||||
// It is a []byte representation of an amcl.BIG
|
||||
// The pseudonym can be seen as a public key of the identity, it is used to verify signatures.
|
||||
NymX []byte `protobuf:"bytes,1,opt,name=nym_x,json=nymX,proto3" json:"nym_x,omitempty"`
|
||||
// nym_y is the Y-component of the pseudonym elliptic curve point.
|
||||
// It is a []byte representation of an amcl.BIG
|
||||
// The pseudonym can be seen as a public key of the identity, it is used to verify signatures.
|
||||
NymY []byte `protobuf:"bytes,2,opt,name=nym_y,json=nymY,proto3" json:"nym_y,omitempty"`
|
||||
// ou contains the organizational unit of the idemix identity
|
||||
Ou []byte `protobuf:"bytes,3,opt,name=ou,proto3" json:"ou,omitempty"`
|
||||
// role contains the role of this identity (e.g., ADMIN or MEMBER)
|
||||
Role []byte `protobuf:"bytes,4,opt,name=role,proto3" json:"role,omitempty"`
|
||||
// proof contains the cryptographic evidence that this identity is valid
|
||||
Proof []byte `protobuf:"bytes,5,opt,name=proof,proto3" json:"proof,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *SerializedIdemixIdentity) Reset() { *m = SerializedIdemixIdentity{} }
|
||||
func (m *SerializedIdemixIdentity) String() string { return proto.CompactTextString(m) }
|
||||
func (*SerializedIdemixIdentity) ProtoMessage() {}
|
||||
func (*SerializedIdemixIdentity) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_872f7fc14bf2c238, []int{1}
|
||||
}
|
||||
|
||||
func (m *SerializedIdemixIdentity) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_SerializedIdemixIdentity.Unmarshal(m, b)
|
||||
}
|
||||
func (m *SerializedIdemixIdentity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_SerializedIdemixIdentity.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *SerializedIdemixIdentity) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_SerializedIdemixIdentity.Merge(m, src)
|
||||
}
|
||||
func (m *SerializedIdemixIdentity) XXX_Size() int {
|
||||
return xxx_messageInfo_SerializedIdemixIdentity.Size(m)
|
||||
}
|
||||
func (m *SerializedIdemixIdentity) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_SerializedIdemixIdentity.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_SerializedIdemixIdentity proto.InternalMessageInfo
|
||||
|
||||
func (m *SerializedIdemixIdentity) GetNymX() []byte {
|
||||
if m != nil {
|
||||
return m.NymX
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *SerializedIdemixIdentity) GetNymY() []byte {
|
||||
if m != nil {
|
||||
return m.NymY
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *SerializedIdemixIdentity) GetOu() []byte {
|
||||
if m != nil {
|
||||
return m.Ou
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *SerializedIdemixIdentity) GetRole() []byte {
|
||||
if m != nil {
|
||||
return m.Role
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *SerializedIdemixIdentity) GetProof() []byte {
|
||||
if m != nil {
|
||||
return m.Proof
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*SerializedIdentity)(nil), "msp.SerializedIdentity")
|
||||
proto.RegisterType((*SerializedIdemixIdentity)(nil), "msp.SerializedIdemixIdentity")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("msp/identities.proto", fileDescriptor_872f7fc14bf2c238) }
|
||||
|
||||
var fileDescriptor_872f7fc14bf2c238 = []byte{
|
||||
// 243 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x8f, 0x31, 0x4f, 0xfb, 0x30,
|
||||
0x10, 0x47, 0x95, 0x34, 0xf9, 0xff, 0xc1, 0xaa, 0x18, 0x4c, 0x07, 0xb3, 0x95, 0x4e, 0x95, 0x50,
|
||||
0x9d, 0x81, 0x6f, 0x50, 0x89, 0x81, 0x0d, 0x85, 0x05, 0x58, 0xaa, 0xa6, 0xbe, 0xa6, 0x27, 0xe5,
|
||||
0x72, 0x96, 0xed, 0x48, 0x35, 0x03, 0x9f, 0x1d, 0x25, 0x06, 0x04, 0x9b, 0x7f, 0x4f, 0x4f, 0x4f,
|
||||
0x3e, 0xb1, 0x20, 0x6f, 0x2b, 0x34, 0xd0, 0x07, 0x0c, 0x08, 0x5e, 0x5b, 0xc7, 0x81, 0xe5, 0x8c,
|
||||
0xbc, 0x5d, 0x3d, 0x08, 0xf9, 0x0c, 0x0e, 0xf7, 0x1d, 0xbe, 0x83, 0x79, 0x4c, 0x4a, 0x94, 0x0b,
|
||||
0x51, 0x92, 0xb7, 0x68, 0x54, 0xb6, 0xcc, 0xd6, 0x97, 0x75, 0x1a, 0xf2, 0x46, 0x5c, 0xa0, 0xd9,
|
||||
0x35, 0x31, 0x80, 0x57, 0xf9, 0x32, 0x5b, 0xcf, 0xeb, 0xff, 0x68, 0xb6, 0xe3, 0x5c, 0x7d, 0x08,
|
||||
0xf5, 0x27, 0x43, 0x78, 0xfe, 0x89, 0x5d, 0x8b, 0xb2, 0x8f, 0xb4, 0x3b, 0x4f, 0xb1, 0x79, 0x5d,
|
||||
0xf4, 0x91, 0x5e, 0xbe, 0x61, 0xfc, 0x0a, 0x8d, 0xf0, 0x55, 0x5e, 0x89, 0x9c, 0x07, 0x35, 0x9b,
|
||||
0x48, 0xce, 0x83, 0x94, 0xa2, 0x70, 0xdc, 0x81, 0x2a, 0x92, 0x33, 0xbe, 0xc7, 0xaf, 0x59, 0xc7,
|
||||
0x7c, 0x54, 0xe5, 0x04, 0xd3, 0xd8, 0x3e, 0x89, 0x5b, 0x76, 0xad, 0x3e, 0x45, 0x0b, 0xae, 0x03,
|
||||
0xd3, 0x82, 0xd3, 0xc7, 0x7d, 0xe3, 0xf0, 0x90, 0x6e, 0xf5, 0x9a, 0xbc, 0x7d, 0xbb, 0x6b, 0x31,
|
||||
0x9c, 0x86, 0x46, 0x1f, 0x98, 0xaa, 0x5f, 0x66, 0x95, 0xcc, 0x4d, 0x32, 0x37, 0x2d, 0x57, 0xe4,
|
||||
0x6d, 0xf3, 0x6f, 0x9a, 0xf7, 0x9f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x2d, 0xf7, 0x41, 0xf7, 0x3c,
|
||||
0x01, 0x00, 0x00,
|
||||
}
|
775
chaincode/vendor/github.com/hyperledger/fabric-protos-go/msp/msp_config.pb.go
generated
vendored
775
chaincode/vendor/github.com/hyperledger/fabric-protos-go/msp/msp_config.pb.go
generated
vendored
@ -0,0 +1,775 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: msp/msp_config.proto
|
||||
|
||||
package msp
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// MSPConfig collects all the configuration information for
|
||||
// an MSP. The Config field should be unmarshalled in a way
|
||||
// that depends on the Type
|
||||
type MSPConfig struct {
|
||||
// Type holds the type of the MSP; the default one would
|
||||
// be of type FABRIC implementing an X.509 based provider
|
||||
Type int32 `protobuf:"varint,1,opt,name=type,proto3" json:"type,omitempty"`
|
||||
// Config is MSP dependent configuration info
|
||||
Config []byte `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *MSPConfig) Reset() { *m = MSPConfig{} }
|
||||
func (m *MSPConfig) String() string { return proto.CompactTextString(m) }
|
||||
func (*MSPConfig) ProtoMessage() {}
|
||||
func (*MSPConfig) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_9c34771f529d9d1a, []int{0}
|
||||
}
|
||||
|
||||
func (m *MSPConfig) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_MSPConfig.Unmarshal(m, b)
|
||||
}
|
||||
func (m *MSPConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_MSPConfig.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *MSPConfig) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_MSPConfig.Merge(m, src)
|
||||
}
|
||||
func (m *MSPConfig) XXX_Size() int {
|
||||
return xxx_messageInfo_MSPConfig.Size(m)
|
||||
}
|
||||
func (m *MSPConfig) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_MSPConfig.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_MSPConfig proto.InternalMessageInfo
|
||||
|
||||
func (m *MSPConfig) GetType() int32 {
|
||||
if m != nil {
|
||||
return m.Type
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *MSPConfig) GetConfig() []byte {
|
||||
if m != nil {
|
||||
return m.Config
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// FabricMSPConfig collects all the configuration information for
|
||||
// a Fabric MSP.
|
||||
// Here we assume a default certificate validation policy, where
|
||||
// any certificate signed by any of the listed rootCA certs would
|
||||
// be considered as valid under this MSP.
|
||||
// This MSP may or may not come with a signing identity. If it does,
|
||||
// it can also issue signing identities. If it does not, it can only
|
||||
// be used to validate and verify certificates.
|
||||
type FabricMSPConfig struct {
|
||||
// Name holds the identifier of the MSP; MSP identifier
|
||||
// is chosen by the application that governs this MSP.
|
||||
// For example, and assuming the default implementation of MSP,
|
||||
// that is X.509-based and considers a single Issuer,
|
||||
// this can refer to the Subject OU field or the Issuer OU field.
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// List of root certificates trusted by this MSP
|
||||
// they are used upon certificate validation (see
|
||||
// comment for IntermediateCerts below)
|
||||
RootCerts [][]byte `protobuf:"bytes,2,rep,name=root_certs,json=rootCerts,proto3" json:"root_certs,omitempty"`
|
||||
// List of intermediate certificates trusted by this MSP;
|
||||
// they are used upon certificate validation as follows:
|
||||
// validation attempts to build a path from the certificate
|
||||
// to be validated (which is at one end of the path) and
|
||||
// one of the certs in the RootCerts field (which is at
|
||||
// the other end of the path). If the path is longer than
|
||||
// 2, certificates in the middle are searched within the
|
||||
// IntermediateCerts pool
|
||||
IntermediateCerts [][]byte `protobuf:"bytes,3,rep,name=intermediate_certs,json=intermediateCerts,proto3" json:"intermediate_certs,omitempty"`
|
||||
// Identity denoting the administrator of this MSP
|
||||
Admins [][]byte `protobuf:"bytes,4,rep,name=admins,proto3" json:"admins,omitempty"`
|
||||
// Identity revocation list
|
||||
RevocationList [][]byte `protobuf:"bytes,5,rep,name=revocation_list,json=revocationList,proto3" json:"revocation_list,omitempty"`
|
||||
// SigningIdentity holds information on the signing identity
|
||||
// this peer is to use, and which is to be imported by the
|
||||
// MSP defined before
|
||||
SigningIdentity *SigningIdentityInfo `protobuf:"bytes,6,opt,name=signing_identity,json=signingIdentity,proto3" json:"signing_identity,omitempty"`
|
||||
// OrganizationalUnitIdentifiers holds one or more
|
||||
// fabric organizational unit identifiers that belong to
|
||||
// this MSP configuration
|
||||
OrganizationalUnitIdentifiers []*FabricOUIdentifier `protobuf:"bytes,7,rep,name=organizational_unit_identifiers,json=organizationalUnitIdentifiers,proto3" json:"organizational_unit_identifiers,omitempty"`
|
||||
// FabricCryptoConfig contains the configuration parameters
|
||||
// for the cryptographic algorithms used by this MSP
|
||||
CryptoConfig *FabricCryptoConfig `protobuf:"bytes,8,opt,name=crypto_config,json=cryptoConfig,proto3" json:"crypto_config,omitempty"`
|
||||
// List of TLS root certificates trusted by this MSP.
|
||||
// They are returned by GetTLSRootCerts.
|
||||
TlsRootCerts [][]byte `protobuf:"bytes,9,rep,name=tls_root_certs,json=tlsRootCerts,proto3" json:"tls_root_certs,omitempty"`
|
||||
// List of TLS intermediate certificates trusted by this MSP;
|
||||
// They are returned by GetTLSIntermediateCerts.
|
||||
TlsIntermediateCerts [][]byte `protobuf:"bytes,10,rep,name=tls_intermediate_certs,json=tlsIntermediateCerts,proto3" json:"tls_intermediate_certs,omitempty"`
|
||||
// fabric_node_ous contains the configuration to distinguish clients from peers from orderers
|
||||
// based on the OUs.
|
||||
FabricNodeOus *FabricNodeOUs `protobuf:"bytes,11,opt,name=fabric_node_ous,json=fabricNodeOus,proto3" json:"fabric_node_ous,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *FabricMSPConfig) Reset() { *m = FabricMSPConfig{} }
|
||||
func (m *FabricMSPConfig) String() string { return proto.CompactTextString(m) }
|
||||
func (*FabricMSPConfig) ProtoMessage() {}
|
||||
func (*FabricMSPConfig) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_9c34771f529d9d1a, []int{1}
|
||||
}
|
||||
|
||||
func (m *FabricMSPConfig) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_FabricMSPConfig.Unmarshal(m, b)
|
||||
}
|
||||
func (m *FabricMSPConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_FabricMSPConfig.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *FabricMSPConfig) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_FabricMSPConfig.Merge(m, src)
|
||||
}
|
||||
func (m *FabricMSPConfig) XXX_Size() int {
|
||||
return xxx_messageInfo_FabricMSPConfig.Size(m)
|
||||
}
|
||||
func (m *FabricMSPConfig) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_FabricMSPConfig.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_FabricMSPConfig proto.InternalMessageInfo
|
||||
|
||||
func (m *FabricMSPConfig) GetName() string {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *FabricMSPConfig) GetRootCerts() [][]byte {
|
||||
if m != nil {
|
||||
return m.RootCerts
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *FabricMSPConfig) GetIntermediateCerts() [][]byte {
|
||||
if m != nil {
|
||||
return m.IntermediateCerts
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *FabricMSPConfig) GetAdmins() [][]byte {
|
||||
if m != nil {
|
||||
return m.Admins
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *FabricMSPConfig) GetRevocationList() [][]byte {
|
||||
if m != nil {
|
||||
return m.RevocationList
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *FabricMSPConfig) GetSigningIdentity() *SigningIdentityInfo {
|
||||
if m != nil {
|
||||
return m.SigningIdentity
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *FabricMSPConfig) GetOrganizationalUnitIdentifiers() []*FabricOUIdentifier {
|
||||
if m != nil {
|
||||
return m.OrganizationalUnitIdentifiers
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *FabricMSPConfig) GetCryptoConfig() *FabricCryptoConfig {
|
||||
if m != nil {
|
||||
return m.CryptoConfig
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *FabricMSPConfig) GetTlsRootCerts() [][]byte {
|
||||
if m != nil {
|
||||
return m.TlsRootCerts
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *FabricMSPConfig) GetTlsIntermediateCerts() [][]byte {
|
||||
if m != nil {
|
||||
return m.TlsIntermediateCerts
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *FabricMSPConfig) GetFabricNodeOus() *FabricNodeOUs {
|
||||
if m != nil {
|
||||
return m.FabricNodeOus
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// FabricCryptoConfig contains configuration parameters
|
||||
// for the cryptographic algorithms used by the MSP
|
||||
// this configuration refers to
|
||||
type FabricCryptoConfig struct {
|
||||
// SignatureHashFamily is a string representing the hash family to be used
|
||||
// during sign and verify operations.
|
||||
// Allowed values are "SHA2" and "SHA3".
|
||||
SignatureHashFamily string `protobuf:"bytes,1,opt,name=signature_hash_family,json=signatureHashFamily,proto3" json:"signature_hash_family,omitempty"`
|
||||
// IdentityIdentifierHashFunction is a string representing the hash function
|
||||
// to be used during the computation of the identity identifier of an MSP identity.
|
||||
// Allowed values are "SHA256", "SHA384" and "SHA3_256", "SHA3_384".
|
||||
IdentityIdentifierHashFunction string `protobuf:"bytes,2,opt,name=identity_identifier_hash_function,json=identityIdentifierHashFunction,proto3" json:"identity_identifier_hash_function,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *FabricCryptoConfig) Reset() { *m = FabricCryptoConfig{} }
|
||||
func (m *FabricCryptoConfig) String() string { return proto.CompactTextString(m) }
|
||||
func (*FabricCryptoConfig) ProtoMessage() {}
|
||||
func (*FabricCryptoConfig) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_9c34771f529d9d1a, []int{2}
|
||||
}
|
||||
|
||||
func (m *FabricCryptoConfig) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_FabricCryptoConfig.Unmarshal(m, b)
|
||||
}
|
||||
func (m *FabricCryptoConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_FabricCryptoConfig.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *FabricCryptoConfig) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_FabricCryptoConfig.Merge(m, src)
|
||||
}
|
||||
func (m *FabricCryptoConfig) XXX_Size() int {
|
||||
return xxx_messageInfo_FabricCryptoConfig.Size(m)
|
||||
}
|
||||
func (m *FabricCryptoConfig) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_FabricCryptoConfig.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_FabricCryptoConfig proto.InternalMessageInfo
|
||||
|
||||
func (m *FabricCryptoConfig) GetSignatureHashFamily() string {
|
||||
if m != nil {
|
||||
return m.SignatureHashFamily
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *FabricCryptoConfig) GetIdentityIdentifierHashFunction() string {
|
||||
if m != nil {
|
||||
return m.IdentityIdentifierHashFunction
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// IdemixMSPConfig collects all the configuration information for
|
||||
// an Idemix MSP.
|
||||
type IdemixMSPConfig struct {
|
||||
// Name holds the identifier of the MSP
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// ipk represents the (serialized) issuer public key
|
||||
Ipk []byte `protobuf:"bytes,2,opt,name=ipk,proto3" json:"ipk,omitempty"`
|
||||
// signer may contain crypto material to configure a default signer
|
||||
Signer *IdemixMSPSignerConfig `protobuf:"bytes,3,opt,name=signer,proto3" json:"signer,omitempty"`
|
||||
// revocation_pk is the public key used for revocation of credentials
|
||||
RevocationPk []byte `protobuf:"bytes,4,opt,name=revocation_pk,json=revocationPk,proto3" json:"revocation_pk,omitempty"`
|
||||
// epoch represents the current epoch (time interval) used for revocation
|
||||
Epoch int64 `protobuf:"varint,5,opt,name=epoch,proto3" json:"epoch,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *IdemixMSPConfig) Reset() { *m = IdemixMSPConfig{} }
|
||||
func (m *IdemixMSPConfig) String() string { return proto.CompactTextString(m) }
|
||||
func (*IdemixMSPConfig) ProtoMessage() {}
|
||||
func (*IdemixMSPConfig) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_9c34771f529d9d1a, []int{3}
|
||||
}
|
||||
|
||||
func (m *IdemixMSPConfig) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_IdemixMSPConfig.Unmarshal(m, b)
|
||||
}
|
||||
func (m *IdemixMSPConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_IdemixMSPConfig.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *IdemixMSPConfig) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_IdemixMSPConfig.Merge(m, src)
|
||||
}
|
||||
func (m *IdemixMSPConfig) XXX_Size() int {
|
||||
return xxx_messageInfo_IdemixMSPConfig.Size(m)
|
||||
}
|
||||
func (m *IdemixMSPConfig) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_IdemixMSPConfig.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_IdemixMSPConfig proto.InternalMessageInfo
|
||||
|
||||
func (m *IdemixMSPConfig) GetName() string {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *IdemixMSPConfig) GetIpk() []byte {
|
||||
if m != nil {
|
||||
return m.Ipk
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *IdemixMSPConfig) GetSigner() *IdemixMSPSignerConfig {
|
||||
if m != nil {
|
||||
return m.Signer
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *IdemixMSPConfig) GetRevocationPk() []byte {
|
||||
if m != nil {
|
||||
return m.RevocationPk
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *IdemixMSPConfig) GetEpoch() int64 {
|
||||
if m != nil {
|
||||
return m.Epoch
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// IdemixMSPSIgnerConfig contains the crypto material to set up an idemix signing identity
|
||||
type IdemixMSPSignerConfig struct {
|
||||
// cred represents the serialized idemix credential of the default signer
|
||||
Cred []byte `protobuf:"bytes,1,opt,name=cred,proto3" json:"cred,omitempty"`
|
||||
// sk is the secret key of the default signer, corresponding to credential Cred
|
||||
Sk []byte `protobuf:"bytes,2,opt,name=sk,proto3" json:"sk,omitempty"`
|
||||
// organizational_unit_identifier defines the organizational unit the default signer is in
|
||||
OrganizationalUnitIdentifier string `protobuf:"bytes,3,opt,name=organizational_unit_identifier,json=organizationalUnitIdentifier,proto3" json:"organizational_unit_identifier,omitempty"`
|
||||
// role defines whether the default signer is admin, peer, member or client
|
||||
Role int32 `protobuf:"varint,4,opt,name=role,proto3" json:"role,omitempty"`
|
||||
// enrollment_id contains the enrollment id of this signer
|
||||
EnrollmentId string `protobuf:"bytes,5,opt,name=enrollment_id,json=enrollmentId,proto3" json:"enrollment_id,omitempty"`
|
||||
// credential_revocation_information contains a serialized CredentialRevocationInformation
|
||||
CredentialRevocationInformation []byte `protobuf:"bytes,6,opt,name=credential_revocation_information,json=credentialRevocationInformation,proto3" json:"credential_revocation_information,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *IdemixMSPSignerConfig) Reset() { *m = IdemixMSPSignerConfig{} }
|
||||
func (m *IdemixMSPSignerConfig) String() string { return proto.CompactTextString(m) }
|
||||
func (*IdemixMSPSignerConfig) ProtoMessage() {}
|
||||
func (*IdemixMSPSignerConfig) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_9c34771f529d9d1a, []int{4}
|
||||
}
|
||||
|
||||
func (m *IdemixMSPSignerConfig) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_IdemixMSPSignerConfig.Unmarshal(m, b)
|
||||
}
|
||||
func (m *IdemixMSPSignerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_IdemixMSPSignerConfig.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *IdemixMSPSignerConfig) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_IdemixMSPSignerConfig.Merge(m, src)
|
||||
}
|
||||
func (m *IdemixMSPSignerConfig) XXX_Size() int {
|
||||
return xxx_messageInfo_IdemixMSPSignerConfig.Size(m)
|
||||
}
|
||||
func (m *IdemixMSPSignerConfig) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_IdemixMSPSignerConfig.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_IdemixMSPSignerConfig proto.InternalMessageInfo
|
||||
|
||||
func (m *IdemixMSPSignerConfig) GetCred() []byte {
|
||||
if m != nil {
|
||||
return m.Cred
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *IdemixMSPSignerConfig) GetSk() []byte {
|
||||
if m != nil {
|
||||
return m.Sk
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *IdemixMSPSignerConfig) GetOrganizationalUnitIdentifier() string {
|
||||
if m != nil {
|
||||
return m.OrganizationalUnitIdentifier
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *IdemixMSPSignerConfig) GetRole() int32 {
|
||||
if m != nil {
|
||||
return m.Role
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *IdemixMSPSignerConfig) GetEnrollmentId() string {
|
||||
if m != nil {
|
||||
return m.EnrollmentId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *IdemixMSPSignerConfig) GetCredentialRevocationInformation() []byte {
|
||||
if m != nil {
|
||||
return m.CredentialRevocationInformation
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SigningIdentityInfo represents the configuration information
|
||||
// related to the signing identity the peer is to use for generating
|
||||
// endorsements
|
||||
type SigningIdentityInfo struct {
|
||||
// PublicSigner carries the public information of the signing
|
||||
// identity. For an X.509 provider this would be represented by
|
||||
// an X.509 certificate
|
||||
PublicSigner []byte `protobuf:"bytes,1,opt,name=public_signer,json=publicSigner,proto3" json:"public_signer,omitempty"`
|
||||
// PrivateSigner denotes a reference to the private key of the
|
||||
// peer's signing identity
|
||||
PrivateSigner *KeyInfo `protobuf:"bytes,2,opt,name=private_signer,json=privateSigner,proto3" json:"private_signer,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *SigningIdentityInfo) Reset() { *m = SigningIdentityInfo{} }
|
||||
func (m *SigningIdentityInfo) String() string { return proto.CompactTextString(m) }
|
||||
func (*SigningIdentityInfo) ProtoMessage() {}
|
||||
func (*SigningIdentityInfo) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_9c34771f529d9d1a, []int{5}
|
||||
}
|
||||
|
||||
func (m *SigningIdentityInfo) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_SigningIdentityInfo.Unmarshal(m, b)
|
||||
}
|
||||
func (m *SigningIdentityInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_SigningIdentityInfo.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *SigningIdentityInfo) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_SigningIdentityInfo.Merge(m, src)
|
||||
}
|
||||
func (m *SigningIdentityInfo) XXX_Size() int {
|
||||
return xxx_messageInfo_SigningIdentityInfo.Size(m)
|
||||
}
|
||||
func (m *SigningIdentityInfo) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_SigningIdentityInfo.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_SigningIdentityInfo proto.InternalMessageInfo
|
||||
|
||||
func (m *SigningIdentityInfo) GetPublicSigner() []byte {
|
||||
if m != nil {
|
||||
return m.PublicSigner
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *SigningIdentityInfo) GetPrivateSigner() *KeyInfo {
|
||||
if m != nil {
|
||||
return m.PrivateSigner
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// KeyInfo represents a (secret) key that is either already stored
|
||||
// in the bccsp/keystore or key material to be imported to the
|
||||
// bccsp key-store. In later versions it may contain also a
|
||||
// keystore identifier
|
||||
type KeyInfo struct {
|
||||
// Identifier of the key inside the default keystore; this for
|
||||
// the case of Software BCCSP as well as the HSM BCCSP would be
|
||||
// the SKI of the key
|
||||
KeyIdentifier string `protobuf:"bytes,1,opt,name=key_identifier,json=keyIdentifier,proto3" json:"key_identifier,omitempty"`
|
||||
// KeyMaterial (optional) for the key to be imported; this is
|
||||
// properly encoded key bytes, prefixed by the type of the key
|
||||
KeyMaterial []byte `protobuf:"bytes,2,opt,name=key_material,json=keyMaterial,proto3" json:"key_material,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *KeyInfo) Reset() { *m = KeyInfo{} }
|
||||
func (m *KeyInfo) String() string { return proto.CompactTextString(m) }
|
||||
func (*KeyInfo) ProtoMessage() {}
|
||||
func (*KeyInfo) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_9c34771f529d9d1a, []int{6}
|
||||
}
|
||||
|
||||
func (m *KeyInfo) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_KeyInfo.Unmarshal(m, b)
|
||||
}
|
||||
func (m *KeyInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_KeyInfo.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *KeyInfo) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_KeyInfo.Merge(m, src)
|
||||
}
|
||||
func (m *KeyInfo) XXX_Size() int {
|
||||
return xxx_messageInfo_KeyInfo.Size(m)
|
||||
}
|
||||
func (m *KeyInfo) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_KeyInfo.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_KeyInfo proto.InternalMessageInfo
|
||||
|
||||
func (m *KeyInfo) GetKeyIdentifier() string {
|
||||
if m != nil {
|
||||
return m.KeyIdentifier
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *KeyInfo) GetKeyMaterial() []byte {
|
||||
if m != nil {
|
||||
return m.KeyMaterial
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// FabricOUIdentifier represents an organizational unit and
|
||||
// its related chain of trust identifier.
|
||||
type FabricOUIdentifier struct {
|
||||
// Certificate represents the second certificate in a certification chain.
|
||||
// (Notice that the first certificate in a certification chain is supposed
|
||||
// to be the certificate of an identity).
|
||||
// It must correspond to the certificate of root or intermediate CA
|
||||
// recognized by the MSP this message belongs to.
|
||||
// Starting from this certificate, a certification chain is computed
|
||||
// and bound to the OrganizationUnitIdentifier specified
|
||||
Certificate []byte `protobuf:"bytes,1,opt,name=certificate,proto3" json:"certificate,omitempty"`
|
||||
// OrganizationUnitIdentifier defines the organizational unit under the
|
||||
// MSP identified with MSPIdentifier
|
||||
OrganizationalUnitIdentifier string `protobuf:"bytes,2,opt,name=organizational_unit_identifier,json=organizationalUnitIdentifier,proto3" json:"organizational_unit_identifier,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *FabricOUIdentifier) Reset() { *m = FabricOUIdentifier{} }
|
||||
func (m *FabricOUIdentifier) String() string { return proto.CompactTextString(m) }
|
||||
func (*FabricOUIdentifier) ProtoMessage() {}
|
||||
func (*FabricOUIdentifier) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_9c34771f529d9d1a, []int{7}
|
||||
}
|
||||
|
||||
func (m *FabricOUIdentifier) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_FabricOUIdentifier.Unmarshal(m, b)
|
||||
}
|
||||
func (m *FabricOUIdentifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_FabricOUIdentifier.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *FabricOUIdentifier) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_FabricOUIdentifier.Merge(m, src)
|
||||
}
|
||||
func (m *FabricOUIdentifier) XXX_Size() int {
|
||||
return xxx_messageInfo_FabricOUIdentifier.Size(m)
|
||||
}
|
||||
func (m *FabricOUIdentifier) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_FabricOUIdentifier.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_FabricOUIdentifier proto.InternalMessageInfo
|
||||
|
||||
func (m *FabricOUIdentifier) GetCertificate() []byte {
|
||||
if m != nil {
|
||||
return m.Certificate
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *FabricOUIdentifier) GetOrganizationalUnitIdentifier() string {
|
||||
if m != nil {
|
||||
return m.OrganizationalUnitIdentifier
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// FabricNodeOUs contains configuration to tell apart clients from peers from orderers
|
||||
// based on OUs. If NodeOUs recognition is enabled then an msp identity
|
||||
// that does not contain any of the specified OU will be considered invalid.
|
||||
type FabricNodeOUs struct {
|
||||
// If true then an msp identity that does not contain any of the specified OU will be considered invalid.
|
||||
Enable bool `protobuf:"varint,1,opt,name=enable,proto3" json:"enable,omitempty"`
|
||||
// OU Identifier of the clients
|
||||
ClientOuIdentifier *FabricOUIdentifier `protobuf:"bytes,2,opt,name=client_ou_identifier,json=clientOuIdentifier,proto3" json:"client_ou_identifier,omitempty"`
|
||||
// OU Identifier of the peers
|
||||
PeerOuIdentifier *FabricOUIdentifier `protobuf:"bytes,3,opt,name=peer_ou_identifier,json=peerOuIdentifier,proto3" json:"peer_ou_identifier,omitempty"`
|
||||
// OU Identifier of the admins
|
||||
AdminOuIdentifier *FabricOUIdentifier `protobuf:"bytes,4,opt,name=admin_ou_identifier,json=adminOuIdentifier,proto3" json:"admin_ou_identifier,omitempty"`
|
||||
// OU Identifier of the orderers
|
||||
OrdererOuIdentifier *FabricOUIdentifier `protobuf:"bytes,5,opt,name=orderer_ou_identifier,json=ordererOuIdentifier,proto3" json:"orderer_ou_identifier,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *FabricNodeOUs) Reset() { *m = FabricNodeOUs{} }
|
||||
func (m *FabricNodeOUs) String() string { return proto.CompactTextString(m) }
|
||||
func (*FabricNodeOUs) ProtoMessage() {}
|
||||
func (*FabricNodeOUs) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_9c34771f529d9d1a, []int{8}
|
||||
}
|
||||
|
||||
func (m *FabricNodeOUs) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_FabricNodeOUs.Unmarshal(m, b)
|
||||
}
|
||||
func (m *FabricNodeOUs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_FabricNodeOUs.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *FabricNodeOUs) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_FabricNodeOUs.Merge(m, src)
|
||||
}
|
||||
func (m *FabricNodeOUs) XXX_Size() int {
|
||||
return xxx_messageInfo_FabricNodeOUs.Size(m)
|
||||
}
|
||||
func (m *FabricNodeOUs) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_FabricNodeOUs.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_FabricNodeOUs proto.InternalMessageInfo
|
||||
|
||||
func (m *FabricNodeOUs) GetEnable() bool {
|
||||
if m != nil {
|
||||
return m.Enable
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *FabricNodeOUs) GetClientOuIdentifier() *FabricOUIdentifier {
|
||||
if m != nil {
|
||||
return m.ClientOuIdentifier
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *FabricNodeOUs) GetPeerOuIdentifier() *FabricOUIdentifier {
|
||||
if m != nil {
|
||||
return m.PeerOuIdentifier
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *FabricNodeOUs) GetAdminOuIdentifier() *FabricOUIdentifier {
|
||||
if m != nil {
|
||||
return m.AdminOuIdentifier
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *FabricNodeOUs) GetOrdererOuIdentifier() *FabricOUIdentifier {
|
||||
if m != nil {
|
||||
return m.OrdererOuIdentifier
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*MSPConfig)(nil), "msp.MSPConfig")
|
||||
proto.RegisterType((*FabricMSPConfig)(nil), "msp.FabricMSPConfig")
|
||||
proto.RegisterType((*FabricCryptoConfig)(nil), "msp.FabricCryptoConfig")
|
||||
proto.RegisterType((*IdemixMSPConfig)(nil), "msp.IdemixMSPConfig")
|
||||
proto.RegisterType((*IdemixMSPSignerConfig)(nil), "msp.IdemixMSPSignerConfig")
|
||||
proto.RegisterType((*SigningIdentityInfo)(nil), "msp.SigningIdentityInfo")
|
||||
proto.RegisterType((*KeyInfo)(nil), "msp.KeyInfo")
|
||||
proto.RegisterType((*FabricOUIdentifier)(nil), "msp.FabricOUIdentifier")
|
||||
proto.RegisterType((*FabricNodeOUs)(nil), "msp.FabricNodeOUs")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("msp/msp_config.proto", fileDescriptor_9c34771f529d9d1a) }
|
||||
|
||||
var fileDescriptor_9c34771f529d9d1a = []byte{
|
||||
// 883 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0xdb, 0x6e, 0x23, 0x45,
|
||||
0x13, 0x96, 0x8f, 0xbb, 0x2e, 0x8f, 0xed, 0x6c, 0xe7, 0xf0, 0x8f, 0x7e, 0xb1, 0xbb, 0x8e, 0x01,
|
||||
0x61, 0x09, 0xc5, 0x91, 0xb2, 0x48, 0x48, 0x88, 0xab, 0x0d, 0x2c, 0x0c, 0x4b, 0x48, 0xd4, 0x51,
|
||||
0x6e, 0xb8, 0x19, 0xb5, 0x67, 0xda, 0xe3, 0x96, 0x67, 0xba, 0x47, 0xdd, 0x3d, 0x2b, 0x8c, 0xb8,
|
||||
0xe6, 0x05, 0x78, 0x07, 0xae, 0x79, 0x13, 0x5e, 0x09, 0xf5, 0x21, 0xf6, 0x38, 0x89, 0x0c, 0x77,
|
||||
0x5d, 0x55, 0x5f, 0x7d, 0x53, 0xfd, 0x55, 0x55, 0x0f, 0x1c, 0x15, 0xaa, 0x3c, 0x2f, 0x54, 0x19,
|
||||
0x27, 0x82, 0x2f, 0x58, 0x36, 0x2b, 0xa5, 0xd0, 0x02, 0xb5, 0x0a, 0x55, 0x4e, 0xbe, 0x84, 0xde,
|
||||
0xd5, 0xed, 0xcd, 0xa5, 0xf5, 0x23, 0x04, 0x6d, 0xbd, 0x2e, 0x69, 0xd8, 0x18, 0x37, 0xa6, 0x1d,
|
||||
0x6c, 0xcf, 0xe8, 0x04, 0xba, 0x2e, 0x2b, 0x6c, 0x8e, 0x1b, 0xd3, 0x00, 0x7b, 0x6b, 0xf2, 0x57,
|
||||
0x1b, 0x46, 0xef, 0xc8, 0x5c, 0xb2, 0x64, 0x27, 0x9f, 0x93, 0xc2, 0xe5, 0xf7, 0xb0, 0x3d, 0xa3,
|
||||
0x97, 0x00, 0x52, 0x08, 0x1d, 0x27, 0x54, 0x6a, 0x15, 0x36, 0xc7, 0xad, 0x69, 0x80, 0x7b, 0xc6,
|
||||
0x73, 0x69, 0x1c, 0xe8, 0x0c, 0x10, 0xe3, 0x9a, 0xca, 0x82, 0xa6, 0x8c, 0x68, 0xea, 0x61, 0x2d,
|
||||
0x0b, 0x7b, 0x51, 0x8f, 0x38, 0xf8, 0x09, 0x74, 0x49, 0x5a, 0x30, 0xae, 0xc2, 0xb6, 0x85, 0x78,
|
||||
0x0b, 0x7d, 0x06, 0x23, 0x49, 0x3f, 0x88, 0x84, 0x68, 0x26, 0x78, 0x9c, 0x33, 0xa5, 0xc3, 0x8e,
|
||||
0x05, 0x0c, 0xb7, 0xee, 0x1f, 0x99, 0xd2, 0xe8, 0x12, 0x0e, 0x14, 0xcb, 0x38, 0xe3, 0x59, 0xcc,
|
||||
0x52, 0xca, 0x35, 0xd3, 0xeb, 0xb0, 0x3b, 0x6e, 0x4c, 0xfb, 0x17, 0xe1, 0xac, 0x50, 0xe5, 0xec,
|
||||
0xd6, 0x05, 0x23, 0x1f, 0x8b, 0xf8, 0x42, 0xe0, 0x91, 0xda, 0x75, 0xa2, 0x18, 0x5e, 0x0b, 0x99,
|
||||
0x11, 0xce, 0x7e, 0xb5, 0xc4, 0x24, 0x8f, 0x2b, 0xce, 0xb4, 0x27, 0x5c, 0x30, 0x2a, 0x55, 0xf8,
|
||||
0x6c, 0xdc, 0x9a, 0xf6, 0x2f, 0xfe, 0x67, 0x39, 0x9d, 0x4c, 0xd7, 0x77, 0xd1, 0x26, 0x8e, 0x5f,
|
||||
0xee, 0xe6, 0xdf, 0x71, 0xa6, 0xb7, 0x51, 0x85, 0xbe, 0x86, 0x41, 0x22, 0xd7, 0xa5, 0x16, 0xbe,
|
||||
0x63, 0xe1, 0x73, 0x5b, 0x62, 0x9d, 0xee, 0xd2, 0xc6, 0x9d, 0xf0, 0x38, 0x48, 0x6a, 0x16, 0xfa,
|
||||
0x04, 0x86, 0x3a, 0x57, 0x71, 0x4d, 0xf6, 0x9e, 0xd5, 0x22, 0xd0, 0xb9, 0xc2, 0x1b, 0xe5, 0xbf,
|
||||
0x80, 0x13, 0x83, 0x7a, 0x42, 0x7d, 0xb0, 0xe8, 0x23, 0x9d, 0xab, 0xe8, 0x51, 0x03, 0xbe, 0x82,
|
||||
0xd1, 0xc2, 0x7e, 0x3f, 0xe6, 0x22, 0xa5, 0xb1, 0xa8, 0x54, 0xd8, 0xb7, 0xb5, 0xa1, 0x5a, 0x6d,
|
||||
0x3f, 0x89, 0x94, 0x5e, 0xdf, 0x29, 0x3c, 0x58, 0x6c, 0xcd, 0x4a, 0x4d, 0xfe, 0x68, 0x00, 0x7a,
|
||||
0x5c, 0x3c, 0xba, 0x80, 0x63, 0x23, 0x30, 0xd1, 0x95, 0xa4, 0xf1, 0x92, 0xa8, 0x65, 0xbc, 0x20,
|
||||
0x05, 0xcb, 0xd7, 0x7e, 0x8c, 0x0e, 0x37, 0xc1, 0xef, 0x89, 0x5a, 0xbe, 0xb3, 0x21, 0x14, 0xc1,
|
||||
0xe9, 0x7d, 0xfb, 0x6a, 0xb2, 0xfb, 0xec, 0x8a, 0x27, 0x46, 0x56, 0x3b, 0xb0, 0x3d, 0xfc, 0xea,
|
||||
0x1e, 0xb8, 0x15, 0xd8, 0x12, 0x79, 0xd4, 0xe4, 0xcf, 0x06, 0x8c, 0xa2, 0x94, 0x16, 0xec, 0x97,
|
||||
0xfd, 0x83, 0x7c, 0x00, 0x2d, 0x56, 0xae, 0xfc, 0x16, 0x98, 0x23, 0xba, 0x80, 0xae, 0xa9, 0x8d,
|
||||
0xca, 0xb0, 0x65, 0x25, 0xf8, 0xbf, 0x95, 0x60, 0xc3, 0x75, 0x6b, 0x63, 0xbe, 0x43, 0x1e, 0x89,
|
||||
0x3e, 0x86, 0x41, 0x6d, 0x50, 0xcb, 0x55, 0xd8, 0xb6, 0x7c, 0xc1, 0xd6, 0x79, 0xb3, 0x42, 0x47,
|
||||
0xd0, 0xa1, 0xa5, 0x48, 0x96, 0x61, 0x67, 0xdc, 0x98, 0xb6, 0xb0, 0x33, 0x26, 0xbf, 0x37, 0xe1,
|
||||
0xf8, 0x49, 0x72, 0x53, 0x6e, 0x22, 0x69, 0x6a, 0xcb, 0x0d, 0xb0, 0x3d, 0xa3, 0x21, 0x34, 0xd5,
|
||||
0x7d, 0xb5, 0x4d, 0xb5, 0x42, 0xdf, 0xc0, 0xab, 0xfd, 0x33, 0x6b, 0x2f, 0xd1, 0xc3, 0x1f, 0xed,
|
||||
0x9b, 0x4c, 0xf3, 0x25, 0x29, 0x72, 0x6a, 0xab, 0xee, 0x60, 0x7b, 0x36, 0x57, 0xa2, 0x5c, 0x8a,
|
||||
0x3c, 0x2f, 0x28, 0x37, 0x84, 0xb6, 0xea, 0x1e, 0x0e, 0xb6, 0xce, 0x28, 0x45, 0x3f, 0xc0, 0xa9,
|
||||
0x29, 0xcb, 0x10, 0x91, 0x3c, 0xae, 0x49, 0xc0, 0xf8, 0x42, 0xc8, 0xc2, 0x9e, 0xed, 0x22, 0x06,
|
||||
0xf8, 0xf5, 0x16, 0x88, 0x37, 0xb8, 0x68, 0x0b, 0x9b, 0x08, 0x38, 0x7c, 0x62, 0x4d, 0x4d, 0x1d,
|
||||
0x65, 0x35, 0xcf, 0x59, 0x12, 0xfb, 0xae, 0x38, 0x39, 0x02, 0xe7, 0x74, 0x82, 0xa1, 0x37, 0x30,
|
||||
0x2c, 0x25, 0xfb, 0x60, 0x86, 0xdd, 0xa3, 0x9a, 0xb6, 0x77, 0x81, 0xed, 0xdd, 0x7b, 0xea, 0x36,
|
||||
0x7e, 0xe0, 0x31, 0x2e, 0x69, 0x72, 0x0b, 0xcf, 0x7c, 0x04, 0x7d, 0x0a, 0xc3, 0x15, 0xad, 0xcf,
|
||||
0x9c, 0x9f, 0x91, 0xc1, 0x8a, 0xd6, 0x06, 0x0c, 0x9d, 0x42, 0x60, 0x60, 0x05, 0xd1, 0x54, 0x32,
|
||||
0x92, 0xfb, 0x3e, 0xf4, 0x57, 0x74, 0x7d, 0xe5, 0x5d, 0x93, 0xdf, 0xee, 0x97, 0xa1, 0xfe, 0x30,
|
||||
0xa0, 0x31, 0xf4, 0xcd, 0x12, 0xb2, 0x05, 0x4b, 0x88, 0xa6, 0xfe, 0x0a, 0x75, 0xd7, 0x7f, 0x68,
|
||||
0x64, 0xf3, 0xdf, 0x1b, 0x39, 0xf9, 0xbb, 0x09, 0x83, 0x9d, 0x65, 0x35, 0x4f, 0x2b, 0xe5, 0x64,
|
||||
0x9e, 0xbb, 0x8f, 0x3e, 0xc7, 0xde, 0x42, 0x11, 0x1c, 0x25, 0x39, 0x33, 0xad, 0x15, 0xd5, 0xc3,
|
||||
0xaf, 0xec, 0x79, 0xe1, 0x90, 0x4b, 0xba, 0xae, 0x6a, 0x97, 0xfb, 0x16, 0x50, 0x49, 0xa9, 0x7c,
|
||||
0x40, 0xd4, 0xda, 0x4f, 0x74, 0x60, 0x52, 0x76, 0x68, 0xbe, 0x83, 0x43, 0xfb, 0xec, 0x3f, 0xe0,
|
||||
0x69, 0xef, 0xe7, 0x79, 0x61, 0x73, 0x76, 0x88, 0xde, 0xc3, 0xb1, 0x90, 0x29, 0x95, 0x8f, 0x4a,
|
||||
0xea, 0xec, 0xa7, 0x3a, 0xf4, 0x59, 0x75, 0xb2, 0xb7, 0x73, 0x38, 0x15, 0x32, 0x9b, 0x2d, 0xd7,
|
||||
0x25, 0x95, 0x39, 0x4d, 0x33, 0x2a, 0x67, 0xee, 0xf9, 0x73, 0xbf, 0x5b, 0x65, 0xc8, 0xde, 0x1e,
|
||||
0x5c, 0xa9, 0xd2, 0x2d, 0xed, 0x0d, 0x49, 0x56, 0x24, 0xa3, 0x3f, 0x7f, 0x9e, 0x31, 0xbd, 0xac,
|
||||
0xe6, 0xb3, 0x44, 0x14, 0xe7, 0xb5, 0xdc, 0x73, 0x97, 0x7b, 0xe6, 0x72, 0xcf, 0x32, 0x61, 0xfe,
|
||||
0xdf, 0xf3, 0xae, 0x35, 0xdf, 0xfc, 0x13, 0x00, 0x00, 0xff, 0xff, 0x42, 0x15, 0x49, 0x47, 0xd1,
|
||||
0x07, 0x00, 0x00,
|
||||
}
|
451
chaincode/vendor/github.com/hyperledger/fabric-protos-go/msp/msp_principal.pb.go
generated
vendored
451
chaincode/vendor/github.com/hyperledger/fabric-protos-go/msp/msp_principal.pb.go
generated
vendored
@ -0,0 +1,451 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: msp/msp_principal.proto
|
||||
|
||||
package msp
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
type MSPPrincipal_Classification int32
|
||||
|
||||
const (
|
||||
MSPPrincipal_ROLE MSPPrincipal_Classification = 0
|
||||
// one of a member of MSP network, and the one of an
|
||||
// administrator of an MSP network
|
||||
MSPPrincipal_ORGANIZATION_UNIT MSPPrincipal_Classification = 1
|
||||
// groupping of entities, per MSP affiliation
|
||||
// E.g., this can well be represented by an MSP's
|
||||
// Organization unit
|
||||
MSPPrincipal_IDENTITY MSPPrincipal_Classification = 2
|
||||
// identity
|
||||
MSPPrincipal_ANONYMITY MSPPrincipal_Classification = 3
|
||||
// an identity to be anonymous or nominal.
|
||||
MSPPrincipal_COMBINED MSPPrincipal_Classification = 4
|
||||
)
|
||||
|
||||
var MSPPrincipal_Classification_name = map[int32]string{
|
||||
0: "ROLE",
|
||||
1: "ORGANIZATION_UNIT",
|
||||
2: "IDENTITY",
|
||||
3: "ANONYMITY",
|
||||
4: "COMBINED",
|
||||
}
|
||||
|
||||
var MSPPrincipal_Classification_value = map[string]int32{
|
||||
"ROLE": 0,
|
||||
"ORGANIZATION_UNIT": 1,
|
||||
"IDENTITY": 2,
|
||||
"ANONYMITY": 3,
|
||||
"COMBINED": 4,
|
||||
}
|
||||
|
||||
func (x MSPPrincipal_Classification) String() string {
|
||||
return proto.EnumName(MSPPrincipal_Classification_name, int32(x))
|
||||
}
|
||||
|
||||
func (MSPPrincipal_Classification) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_82e08b7ead29bd48, []int{0, 0}
|
||||
}
|
||||
|
||||
type MSPRole_MSPRoleType int32
|
||||
|
||||
const (
|
||||
MSPRole_MEMBER MSPRole_MSPRoleType = 0
|
||||
MSPRole_ADMIN MSPRole_MSPRoleType = 1
|
||||
MSPRole_CLIENT MSPRole_MSPRoleType = 2
|
||||
MSPRole_PEER MSPRole_MSPRoleType = 3
|
||||
MSPRole_ORDERER MSPRole_MSPRoleType = 4
|
||||
)
|
||||
|
||||
var MSPRole_MSPRoleType_name = map[int32]string{
|
||||
0: "MEMBER",
|
||||
1: "ADMIN",
|
||||
2: "CLIENT",
|
||||
3: "PEER",
|
||||
4: "ORDERER",
|
||||
}
|
||||
|
||||
var MSPRole_MSPRoleType_value = map[string]int32{
|
||||
"MEMBER": 0,
|
||||
"ADMIN": 1,
|
||||
"CLIENT": 2,
|
||||
"PEER": 3,
|
||||
"ORDERER": 4,
|
||||
}
|
||||
|
||||
func (x MSPRole_MSPRoleType) String() string {
|
||||
return proto.EnumName(MSPRole_MSPRoleType_name, int32(x))
|
||||
}
|
||||
|
||||
func (MSPRole_MSPRoleType) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_82e08b7ead29bd48, []int{2, 0}
|
||||
}
|
||||
|
||||
type MSPIdentityAnonymity_MSPIdentityAnonymityType int32
|
||||
|
||||
const (
|
||||
MSPIdentityAnonymity_NOMINAL MSPIdentityAnonymity_MSPIdentityAnonymityType = 0
|
||||
MSPIdentityAnonymity_ANONYMOUS MSPIdentityAnonymity_MSPIdentityAnonymityType = 1
|
||||
)
|
||||
|
||||
var MSPIdentityAnonymity_MSPIdentityAnonymityType_name = map[int32]string{
|
||||
0: "NOMINAL",
|
||||
1: "ANONYMOUS",
|
||||
}
|
||||
|
||||
var MSPIdentityAnonymity_MSPIdentityAnonymityType_value = map[string]int32{
|
||||
"NOMINAL": 0,
|
||||
"ANONYMOUS": 1,
|
||||
}
|
||||
|
||||
func (x MSPIdentityAnonymity_MSPIdentityAnonymityType) String() string {
|
||||
return proto.EnumName(MSPIdentityAnonymity_MSPIdentityAnonymityType_name, int32(x))
|
||||
}
|
||||
|
||||
func (MSPIdentityAnonymity_MSPIdentityAnonymityType) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_82e08b7ead29bd48, []int{3, 0}
|
||||
}
|
||||
|
||||
// MSPPrincipal aims to represent an MSP-centric set of identities.
|
||||
// In particular, this structure allows for definition of
|
||||
// - a group of identities that are member of the same MSP
|
||||
// - a group of identities that are member of the same organization unit
|
||||
// in the same MSP
|
||||
// - a group of identities that are administering a specific MSP
|
||||
// - a specific identity
|
||||
// Expressing these groups is done given two fields of the fields below
|
||||
// - Classification, that defines the type of classification of identities
|
||||
// in an MSP this principal would be defined on; Classification can take
|
||||
// three values:
|
||||
// (i) ByMSPRole: that represents a classification of identities within
|
||||
// MSP based on one of the two pre-defined MSP rules, "member" and "admin"
|
||||
// (ii) ByOrganizationUnit: that represents a classification of identities
|
||||
// within MSP based on the organization unit an identity belongs to
|
||||
// (iii)ByIdentity that denotes that MSPPrincipal is mapped to a single
|
||||
// identity/certificate; this would mean that the Principal bytes
|
||||
// message
|
||||
type MSPPrincipal struct {
|
||||
// Classification describes the way that one should process
|
||||
// Principal. An Classification value of "ByOrganizationUnit" reflects
|
||||
// that "Principal" contains the name of an organization this MSP
|
||||
// handles. A Classification value "ByIdentity" means that
|
||||
// "Principal" contains a specific identity. Default value
|
||||
// denotes that Principal contains one of the groups by
|
||||
// default supported by all MSPs ("admin" or "member").
|
||||
PrincipalClassification MSPPrincipal_Classification `protobuf:"varint,1,opt,name=principal_classification,json=principalClassification,proto3,enum=common.MSPPrincipal_Classification" json:"principal_classification,omitempty"`
|
||||
// Principal completes the policy principal definition. For the default
|
||||
// principal types, Principal can be either "Admin" or "Member".
|
||||
// For the ByOrganizationUnit/ByIdentity values of Classification,
|
||||
// PolicyPrincipal acquires its value from an organization unit or
|
||||
// identity, respectively.
|
||||
// For the Combined Classification type, the Principal is a marshalled
|
||||
// CombinedPrincipal.
|
||||
Principal []byte `protobuf:"bytes,2,opt,name=principal,proto3" json:"principal,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *MSPPrincipal) Reset() { *m = MSPPrincipal{} }
|
||||
func (m *MSPPrincipal) String() string { return proto.CompactTextString(m) }
|
||||
func (*MSPPrincipal) ProtoMessage() {}
|
||||
func (*MSPPrincipal) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_82e08b7ead29bd48, []int{0}
|
||||
}
|
||||
|
||||
func (m *MSPPrincipal) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_MSPPrincipal.Unmarshal(m, b)
|
||||
}
|
||||
func (m *MSPPrincipal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_MSPPrincipal.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *MSPPrincipal) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_MSPPrincipal.Merge(m, src)
|
||||
}
|
||||
func (m *MSPPrincipal) XXX_Size() int {
|
||||
return xxx_messageInfo_MSPPrincipal.Size(m)
|
||||
}
|
||||
func (m *MSPPrincipal) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_MSPPrincipal.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_MSPPrincipal proto.InternalMessageInfo
|
||||
|
||||
func (m *MSPPrincipal) GetPrincipalClassification() MSPPrincipal_Classification {
|
||||
if m != nil {
|
||||
return m.PrincipalClassification
|
||||
}
|
||||
return MSPPrincipal_ROLE
|
||||
}
|
||||
|
||||
func (m *MSPPrincipal) GetPrincipal() []byte {
|
||||
if m != nil {
|
||||
return m.Principal
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// OrganizationUnit governs the organization of the Principal
|
||||
// field of a policy principal when a specific organization unity members
|
||||
// are to be defined within a policy principal.
|
||||
type OrganizationUnit struct {
|
||||
// MSPIdentifier represents the identifier of the MSP this organization unit
|
||||
// refers to
|
||||
MspIdentifier string `protobuf:"bytes,1,opt,name=msp_identifier,json=mspIdentifier,proto3" json:"msp_identifier,omitempty"`
|
||||
// OrganizationUnitIdentifier defines the organizational unit under the
|
||||
// MSP identified with MSPIdentifier
|
||||
OrganizationalUnitIdentifier string `protobuf:"bytes,2,opt,name=organizational_unit_identifier,json=organizationalUnitIdentifier,proto3" json:"organizational_unit_identifier,omitempty"`
|
||||
// CertifiersIdentifier is the hash of certificates chain of trust
|
||||
// related to this organizational unit
|
||||
CertifiersIdentifier []byte `protobuf:"bytes,3,opt,name=certifiers_identifier,json=certifiersIdentifier,proto3" json:"certifiers_identifier,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *OrganizationUnit) Reset() { *m = OrganizationUnit{} }
|
||||
func (m *OrganizationUnit) String() string { return proto.CompactTextString(m) }
|
||||
func (*OrganizationUnit) ProtoMessage() {}
|
||||
func (*OrganizationUnit) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_82e08b7ead29bd48, []int{1}
|
||||
}
|
||||
|
||||
func (m *OrganizationUnit) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_OrganizationUnit.Unmarshal(m, b)
|
||||
}
|
||||
func (m *OrganizationUnit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_OrganizationUnit.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *OrganizationUnit) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_OrganizationUnit.Merge(m, src)
|
||||
}
|
||||
func (m *OrganizationUnit) XXX_Size() int {
|
||||
return xxx_messageInfo_OrganizationUnit.Size(m)
|
||||
}
|
||||
func (m *OrganizationUnit) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_OrganizationUnit.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_OrganizationUnit proto.InternalMessageInfo
|
||||
|
||||
func (m *OrganizationUnit) GetMspIdentifier() string {
|
||||
if m != nil {
|
||||
return m.MspIdentifier
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *OrganizationUnit) GetOrganizationalUnitIdentifier() string {
|
||||
if m != nil {
|
||||
return m.OrganizationalUnitIdentifier
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *OrganizationUnit) GetCertifiersIdentifier() []byte {
|
||||
if m != nil {
|
||||
return m.CertifiersIdentifier
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MSPRole governs the organization of the Principal
|
||||
// field of an MSPPrincipal when it aims to define one of the
|
||||
// two dedicated roles within an MSP: Admin and Members.
|
||||
type MSPRole struct {
|
||||
// MSPIdentifier represents the identifier of the MSP this principal
|
||||
// refers to
|
||||
MspIdentifier string `protobuf:"bytes,1,opt,name=msp_identifier,json=mspIdentifier,proto3" json:"msp_identifier,omitempty"`
|
||||
// MSPRoleType defines which of the available, pre-defined MSP-roles
|
||||
// an identiy should posess inside the MSP with identifier MSPidentifier
|
||||
Role MSPRole_MSPRoleType `protobuf:"varint,2,opt,name=role,proto3,enum=common.MSPRole_MSPRoleType" json:"role,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *MSPRole) Reset() { *m = MSPRole{} }
|
||||
func (m *MSPRole) String() string { return proto.CompactTextString(m) }
|
||||
func (*MSPRole) ProtoMessage() {}
|
||||
func (*MSPRole) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_82e08b7ead29bd48, []int{2}
|
||||
}
|
||||
|
||||
func (m *MSPRole) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_MSPRole.Unmarshal(m, b)
|
||||
}
|
||||
func (m *MSPRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_MSPRole.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *MSPRole) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_MSPRole.Merge(m, src)
|
||||
}
|
||||
func (m *MSPRole) XXX_Size() int {
|
||||
return xxx_messageInfo_MSPRole.Size(m)
|
||||
}
|
||||
func (m *MSPRole) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_MSPRole.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_MSPRole proto.InternalMessageInfo
|
||||
|
||||
func (m *MSPRole) GetMspIdentifier() string {
|
||||
if m != nil {
|
||||
return m.MspIdentifier
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *MSPRole) GetRole() MSPRole_MSPRoleType {
|
||||
if m != nil {
|
||||
return m.Role
|
||||
}
|
||||
return MSPRole_MEMBER
|
||||
}
|
||||
|
||||
// MSPIdentityAnonymity can be used to enforce an identity to be anonymous or nominal.
|
||||
type MSPIdentityAnonymity struct {
|
||||
AnonymityType MSPIdentityAnonymity_MSPIdentityAnonymityType `protobuf:"varint,1,opt,name=anonymity_type,json=anonymityType,proto3,enum=common.MSPIdentityAnonymity_MSPIdentityAnonymityType" json:"anonymity_type,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *MSPIdentityAnonymity) Reset() { *m = MSPIdentityAnonymity{} }
|
||||
func (m *MSPIdentityAnonymity) String() string { return proto.CompactTextString(m) }
|
||||
func (*MSPIdentityAnonymity) ProtoMessage() {}
|
||||
func (*MSPIdentityAnonymity) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_82e08b7ead29bd48, []int{3}
|
||||
}
|
||||
|
||||
func (m *MSPIdentityAnonymity) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_MSPIdentityAnonymity.Unmarshal(m, b)
|
||||
}
|
||||
func (m *MSPIdentityAnonymity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_MSPIdentityAnonymity.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *MSPIdentityAnonymity) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_MSPIdentityAnonymity.Merge(m, src)
|
||||
}
|
||||
func (m *MSPIdentityAnonymity) XXX_Size() int {
|
||||
return xxx_messageInfo_MSPIdentityAnonymity.Size(m)
|
||||
}
|
||||
func (m *MSPIdentityAnonymity) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_MSPIdentityAnonymity.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_MSPIdentityAnonymity proto.InternalMessageInfo
|
||||
|
||||
func (m *MSPIdentityAnonymity) GetAnonymityType() MSPIdentityAnonymity_MSPIdentityAnonymityType {
|
||||
if m != nil {
|
||||
return m.AnonymityType
|
||||
}
|
||||
return MSPIdentityAnonymity_NOMINAL
|
||||
}
|
||||
|
||||
// CombinedPrincipal governs the organization of the Principal
|
||||
// field of a policy principal when principal_classification has
|
||||
// indicated that a combined form of principals is required
|
||||
type CombinedPrincipal struct {
|
||||
// Principals refer to combined principals
|
||||
Principals []*MSPPrincipal `protobuf:"bytes,1,rep,name=principals,proto3" json:"principals,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *CombinedPrincipal) Reset() { *m = CombinedPrincipal{} }
|
||||
func (m *CombinedPrincipal) String() string { return proto.CompactTextString(m) }
|
||||
func (*CombinedPrincipal) ProtoMessage() {}
|
||||
func (*CombinedPrincipal) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_82e08b7ead29bd48, []int{4}
|
||||
}
|
||||
|
||||
func (m *CombinedPrincipal) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_CombinedPrincipal.Unmarshal(m, b)
|
||||
}
|
||||
func (m *CombinedPrincipal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_CombinedPrincipal.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *CombinedPrincipal) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_CombinedPrincipal.Merge(m, src)
|
||||
}
|
||||
func (m *CombinedPrincipal) XXX_Size() int {
|
||||
return xxx_messageInfo_CombinedPrincipal.Size(m)
|
||||
}
|
||||
func (m *CombinedPrincipal) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_CombinedPrincipal.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_CombinedPrincipal proto.InternalMessageInfo
|
||||
|
||||
func (m *CombinedPrincipal) GetPrincipals() []*MSPPrincipal {
|
||||
if m != nil {
|
||||
return m.Principals
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("common.MSPPrincipal_Classification", MSPPrincipal_Classification_name, MSPPrincipal_Classification_value)
|
||||
proto.RegisterEnum("common.MSPRole_MSPRoleType", MSPRole_MSPRoleType_name, MSPRole_MSPRoleType_value)
|
||||
proto.RegisterEnum("common.MSPIdentityAnonymity_MSPIdentityAnonymityType", MSPIdentityAnonymity_MSPIdentityAnonymityType_name, MSPIdentityAnonymity_MSPIdentityAnonymityType_value)
|
||||
proto.RegisterType((*MSPPrincipal)(nil), "common.MSPPrincipal")
|
||||
proto.RegisterType((*OrganizationUnit)(nil), "common.OrganizationUnit")
|
||||
proto.RegisterType((*MSPRole)(nil), "common.MSPRole")
|
||||
proto.RegisterType((*MSPIdentityAnonymity)(nil), "common.MSPIdentityAnonymity")
|
||||
proto.RegisterType((*CombinedPrincipal)(nil), "common.CombinedPrincipal")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("msp/msp_principal.proto", fileDescriptor_82e08b7ead29bd48) }
|
||||
|
||||
var fileDescriptor_82e08b7ead29bd48 = []byte{
|
||||
// 528 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x5f, 0x6b, 0xdb, 0x3e,
|
||||
0x14, 0xad, 0x93, 0xfc, 0xda, 0xe6, 0xe6, 0x0f, 0xaa, 0x48, 0x69, 0xe0, 0x57, 0x46, 0xf0, 0x36,
|
||||
0x08, 0x8c, 0x3a, 0x90, 0x6e, 0x7b, 0x77, 0x12, 0x53, 0x0c, 0xb1, 0x1c, 0x14, 0xe7, 0xa1, 0xa5,
|
||||
0x2c, 0x38, 0x8e, 0x92, 0x0a, 0x6c, 0xcb, 0xd8, 0xee, 0x83, 0xf7, 0x91, 0xc6, 0x1e, 0xf7, 0xa9,
|
||||
0xf6, 0x29, 0x86, 0xed, 0x26, 0x51, 0xb6, 0x0e, 0xf6, 0x64, 0xce, 0xbd, 0xe7, 0x1c, 0x1f, 0x49,
|
||||
0xf7, 0xc2, 0x55, 0x90, 0x44, 0x83, 0x20, 0x89, 0x96, 0x51, 0xcc, 0x43, 0x8f, 0x47, 0xae, 0xaf,
|
||||
0x45, 0xb1, 0x48, 0x05, 0x3e, 0xf5, 0x44, 0x10, 0x88, 0x50, 0xfd, 0xa9, 0x40, 0xd3, 0x9a, 0xcf,
|
||||
0x66, 0xbb, 0x36, 0xfe, 0x02, 0xdd, 0x3d, 0x77, 0xe9, 0xf9, 0x6e, 0x92, 0xf0, 0x0d, 0xf7, 0xdc,
|
||||
0x94, 0x8b, 0xb0, 0xab, 0xf4, 0x94, 0x7e, 0x7b, 0xf8, 0x56, 0x2b, 0xb5, 0x9a, 0xac, 0xd3, 0xc6,
|
||||
0x47, 0x54, 0x7a, 0xb5, 0x37, 0x39, 0x6e, 0xe0, 0x6b, 0xa8, 0xef, 0x5b, 0xdd, 0x4a, 0x4f, 0xe9,
|
||||
0x37, 0xe9, 0xa1, 0xa0, 0x3e, 0x42, 0xfb, 0x37, 0xfe, 0x39, 0xd4, 0xa8, 0x3d, 0x35, 0xd0, 0x09,
|
||||
0xbe, 0x84, 0x0b, 0x9b, 0xde, 0xe9, 0xc4, 0x7c, 0xd0, 0x1d, 0xd3, 0x26, 0xcb, 0x05, 0x31, 0x1d,
|
||||
0xa4, 0xe0, 0x26, 0x9c, 0x9b, 0x13, 0x83, 0x38, 0xa6, 0x73, 0x8f, 0x2a, 0xb8, 0x05, 0x75, 0x9d,
|
||||
0xd8, 0xe4, 0xde, 0xca, 0x61, 0x35, 0x6f, 0x8e, 0x6d, 0x6b, 0x64, 0x12, 0x63, 0x82, 0x6a, 0xea,
|
||||
0x0f, 0x05, 0x90, 0x1d, 0x6f, 0xdd, 0x90, 0x7f, 0x2d, 0xcc, 0x17, 0x21, 0x4f, 0xf1, 0x7b, 0x68,
|
||||
0xe7, 0x17, 0xc4, 0xd7, 0x2c, 0x4c, 0xf9, 0x86, 0xb3, 0xb8, 0x38, 0x66, 0x9d, 0xb6, 0x82, 0x24,
|
||||
0x32, 0xf7, 0x45, 0x3c, 0x81, 0x37, 0x42, 0x92, 0xba, 0xfe, 0xf2, 0x39, 0xe4, 0xa9, 0x2c, 0xab,
|
||||
0x14, 0xb2, 0xeb, 0x63, 0x56, 0xfe, 0x0b, 0xc9, 0xe5, 0x16, 0x2e, 0x3d, 0x16, 0x97, 0x20, 0x91,
|
||||
0xc5, 0xd5, 0xe2, 0x26, 0x3a, 0x87, 0xe6, 0x41, 0xa4, 0x7e, 0x53, 0xe0, 0xcc, 0x9a, 0xcf, 0xa8,
|
||||
0xf0, 0xd9, 0xbf, 0xa6, 0x1d, 0x40, 0x2d, 0x16, 0x3e, 0x2b, 0x32, 0xb5, 0x87, 0xff, 0x4b, 0x2f,
|
||||
0x96, 0xbb, 0xec, 0xbe, 0x4e, 0x16, 0x31, 0x5a, 0x10, 0xd5, 0x3b, 0x68, 0x48, 0x45, 0x0c, 0x70,
|
||||
0x6a, 0x19, 0xd6, 0xc8, 0xa0, 0xe8, 0x04, 0xd7, 0xe1, 0x3f, 0x7d, 0x62, 0x99, 0x04, 0x29, 0x79,
|
||||
0x79, 0x3c, 0x35, 0x0d, 0xe2, 0xa0, 0x4a, 0xfe, 0x30, 0x33, 0xc3, 0xa0, 0xa8, 0x8a, 0x1b, 0x70,
|
||||
0x66, 0xd3, 0x89, 0x41, 0x0d, 0x8a, 0x6a, 0xea, 0x77, 0x05, 0x3a, 0xd6, 0x7c, 0x56, 0x66, 0x49,
|
||||
0x33, 0x3d, 0x14, 0x61, 0x16, 0xf0, 0x34, 0xc3, 0x8f, 0xd0, 0x76, 0x77, 0x60, 0x99, 0x66, 0x11,
|
||||
0x7b, 0x19, 0xa7, 0x4f, 0x52, 0xb8, 0x3f, 0x54, 0xaf, 0x16, 0x8b, 0xd8, 0x2d, 0x57, 0x86, 0xea,
|
||||
0x67, 0xe8, 0xfe, 0x8d, 0x9a, 0xe7, 0x23, 0xb6, 0x65, 0x12, 0x7d, 0x8a, 0x4e, 0x0e, 0x03, 0x62,
|
||||
0x2f, 0xe6, 0x48, 0x51, 0x4d, 0xb8, 0x18, 0x8b, 0x60, 0xc5, 0x43, 0xb6, 0x3e, 0xec, 0xc0, 0x47,
|
||||
0x80, 0xfd, 0x48, 0x26, 0x5d, 0xa5, 0x57, 0xed, 0x37, 0x86, 0x9d, 0xd7, 0xa6, 0x9e, 0x4a, 0xbc,
|
||||
0xd1, 0x1c, 0xde, 0x89, 0x78, 0xab, 0x3d, 0x65, 0x11, 0x8b, 0x7d, 0xb6, 0xde, 0xb2, 0x58, 0xdb,
|
||||
0xb8, 0xab, 0x98, 0x7b, 0xe5, 0xca, 0x25, 0x2f, 0x06, 0x0f, 0x1f, 0xb6, 0x3c, 0x7d, 0x7a, 0x5e,
|
||||
0xe5, 0x70, 0x20, 0x91, 0x07, 0x25, 0xf9, 0xa6, 0x24, 0xdf, 0x6c, 0x45, 0xbe, 0xb7, 0xab, 0xd3,
|
||||
0x02, 0xde, 0xfe, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x2b, 0x94, 0x15, 0xa2, 0xc9, 0x03, 0x00, 0x00,
|
||||
}
|
599
chaincode/vendor/github.com/hyperledger/fabric-protos-go/peer/chaincode.pb.go
generated
vendored
599
chaincode/vendor/github.com/hyperledger/fabric-protos-go/peer/chaincode.pb.go
generated
vendored
@ -0,0 +1,599 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: peer/chaincode.proto
|
||||
|
||||
package peer
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
common "github.com/hyperledger/fabric-protos-go/common"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
type ChaincodeSpec_Type int32
|
||||
|
||||
const (
|
||||
ChaincodeSpec_UNDEFINED ChaincodeSpec_Type = 0
|
||||
ChaincodeSpec_GOLANG ChaincodeSpec_Type = 1
|
||||
ChaincodeSpec_NODE ChaincodeSpec_Type = 2
|
||||
ChaincodeSpec_CAR ChaincodeSpec_Type = 3
|
||||
ChaincodeSpec_JAVA ChaincodeSpec_Type = 4
|
||||
)
|
||||
|
||||
var ChaincodeSpec_Type_name = map[int32]string{
|
||||
0: "UNDEFINED",
|
||||
1: "GOLANG",
|
||||
2: "NODE",
|
||||
3: "CAR",
|
||||
4: "JAVA",
|
||||
}
|
||||
|
||||
var ChaincodeSpec_Type_value = map[string]int32{
|
||||
"UNDEFINED": 0,
|
||||
"GOLANG": 1,
|
||||
"NODE": 2,
|
||||
"CAR": 3,
|
||||
"JAVA": 4,
|
||||
}
|
||||
|
||||
func (x ChaincodeSpec_Type) String() string {
|
||||
return proto.EnumName(ChaincodeSpec_Type_name, int32(x))
|
||||
}
|
||||
|
||||
func (ChaincodeSpec_Type) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_202814c635ff5fee, []int{2, 0}
|
||||
}
|
||||
|
||||
//ChaincodeID contains the path as specified by the deploy transaction
|
||||
//that created it as well as the hashCode that is generated by the
|
||||
//system for the path. From the user level (ie, CLI, REST API and so on)
|
||||
//deploy transaction is expected to provide the path and other requests
|
||||
//are expected to provide the hashCode. The other value will be ignored.
|
||||
//Internally, the structure could contain both values. For instance, the
|
||||
//hashCode will be set when first generated using the path
|
||||
type ChaincodeID struct {
|
||||
//deploy transaction will use the path
|
||||
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
|
||||
//all other requests will use the name (really a hashcode) generated by
|
||||
//the deploy transaction
|
||||
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
|
||||
//user friendly version name for the chaincode
|
||||
Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ChaincodeID) Reset() { *m = ChaincodeID{} }
|
||||
func (m *ChaincodeID) String() string { return proto.CompactTextString(m) }
|
||||
func (*ChaincodeID) ProtoMessage() {}
|
||||
func (*ChaincodeID) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_202814c635ff5fee, []int{0}
|
||||
}
|
||||
|
||||
func (m *ChaincodeID) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ChaincodeID.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ChaincodeID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ChaincodeID.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ChaincodeID) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ChaincodeID.Merge(m, src)
|
||||
}
|
||||
func (m *ChaincodeID) XXX_Size() int {
|
||||
return xxx_messageInfo_ChaincodeID.Size(m)
|
||||
}
|
||||
func (m *ChaincodeID) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ChaincodeID.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ChaincodeID proto.InternalMessageInfo
|
||||
|
||||
func (m *ChaincodeID) GetPath() string {
|
||||
if m != nil {
|
||||
return m.Path
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ChaincodeID) GetName() string {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ChaincodeID) GetVersion() string {
|
||||
if m != nil {
|
||||
return m.Version
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Carries the chaincode function and its arguments.
|
||||
// UnmarshalJSON in transaction.go converts the string-based REST/JSON input to
|
||||
// the []byte-based current ChaincodeInput structure.
|
||||
type ChaincodeInput struct {
|
||||
Args [][]byte `protobuf:"bytes,1,rep,name=args,proto3" json:"args,omitempty"`
|
||||
Decorations map[string][]byte `protobuf:"bytes,2,rep,name=decorations,proto3" json:"decorations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
// is_init is used for the application to signal that an invocation is to be routed
|
||||
// to the legacy 'Init' function for compatibility with chaincodes which handled
|
||||
// Init in the old way. New applications should manage their initialized state
|
||||
// themselves.
|
||||
IsInit bool `protobuf:"varint,3,opt,name=is_init,json=isInit,proto3" json:"is_init,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ChaincodeInput) Reset() { *m = ChaincodeInput{} }
|
||||
func (m *ChaincodeInput) String() string { return proto.CompactTextString(m) }
|
||||
func (*ChaincodeInput) ProtoMessage() {}
|
||||
func (*ChaincodeInput) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_202814c635ff5fee, []int{1}
|
||||
}
|
||||
|
||||
func (m *ChaincodeInput) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ChaincodeInput.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ChaincodeInput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ChaincodeInput.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ChaincodeInput) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ChaincodeInput.Merge(m, src)
|
||||
}
|
||||
func (m *ChaincodeInput) XXX_Size() int {
|
||||
return xxx_messageInfo_ChaincodeInput.Size(m)
|
||||
}
|
||||
func (m *ChaincodeInput) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ChaincodeInput.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ChaincodeInput proto.InternalMessageInfo
|
||||
|
||||
func (m *ChaincodeInput) GetArgs() [][]byte {
|
||||
if m != nil {
|
||||
return m.Args
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ChaincodeInput) GetDecorations() map[string][]byte {
|
||||
if m != nil {
|
||||
return m.Decorations
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ChaincodeInput) GetIsInit() bool {
|
||||
if m != nil {
|
||||
return m.IsInit
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Carries the chaincode specification. This is the actual metadata required for
|
||||
// defining a chaincode.
|
||||
type ChaincodeSpec struct {
|
||||
Type ChaincodeSpec_Type `protobuf:"varint,1,opt,name=type,proto3,enum=protos.ChaincodeSpec_Type" json:"type,omitempty"`
|
||||
ChaincodeId *ChaincodeID `protobuf:"bytes,2,opt,name=chaincode_id,json=chaincodeId,proto3" json:"chaincode_id,omitempty"`
|
||||
Input *ChaincodeInput `protobuf:"bytes,3,opt,name=input,proto3" json:"input,omitempty"`
|
||||
Timeout int32 `protobuf:"varint,4,opt,name=timeout,proto3" json:"timeout,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ChaincodeSpec) Reset() { *m = ChaincodeSpec{} }
|
||||
func (m *ChaincodeSpec) String() string { return proto.CompactTextString(m) }
|
||||
func (*ChaincodeSpec) ProtoMessage() {}
|
||||
func (*ChaincodeSpec) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_202814c635ff5fee, []int{2}
|
||||
}
|
||||
|
||||
func (m *ChaincodeSpec) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ChaincodeSpec.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ChaincodeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ChaincodeSpec.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ChaincodeSpec) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ChaincodeSpec.Merge(m, src)
|
||||
}
|
||||
func (m *ChaincodeSpec) XXX_Size() int {
|
||||
return xxx_messageInfo_ChaincodeSpec.Size(m)
|
||||
}
|
||||
func (m *ChaincodeSpec) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ChaincodeSpec.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ChaincodeSpec proto.InternalMessageInfo
|
||||
|
||||
func (m *ChaincodeSpec) GetType() ChaincodeSpec_Type {
|
||||
if m != nil {
|
||||
return m.Type
|
||||
}
|
||||
return ChaincodeSpec_UNDEFINED
|
||||
}
|
||||
|
||||
func (m *ChaincodeSpec) GetChaincodeId() *ChaincodeID {
|
||||
if m != nil {
|
||||
return m.ChaincodeId
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ChaincodeSpec) GetInput() *ChaincodeInput {
|
||||
if m != nil {
|
||||
return m.Input
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ChaincodeSpec) GetTimeout() int32 {
|
||||
if m != nil {
|
||||
return m.Timeout
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Specify the deployment of a chaincode.
|
||||
// TODO: Define `codePackage`.
|
||||
type ChaincodeDeploymentSpec struct {
|
||||
ChaincodeSpec *ChaincodeSpec `protobuf:"bytes,1,opt,name=chaincode_spec,json=chaincodeSpec,proto3" json:"chaincode_spec,omitempty"`
|
||||
CodePackage []byte `protobuf:"bytes,3,opt,name=code_package,json=codePackage,proto3" json:"code_package,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ChaincodeDeploymentSpec) Reset() { *m = ChaincodeDeploymentSpec{} }
|
||||
func (m *ChaincodeDeploymentSpec) String() string { return proto.CompactTextString(m) }
|
||||
func (*ChaincodeDeploymentSpec) ProtoMessage() {}
|
||||
func (*ChaincodeDeploymentSpec) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_202814c635ff5fee, []int{3}
|
||||
}
|
||||
|
||||
func (m *ChaincodeDeploymentSpec) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ChaincodeDeploymentSpec.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ChaincodeDeploymentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ChaincodeDeploymentSpec.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ChaincodeDeploymentSpec) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ChaincodeDeploymentSpec.Merge(m, src)
|
||||
}
|
||||
func (m *ChaincodeDeploymentSpec) XXX_Size() int {
|
||||
return xxx_messageInfo_ChaincodeDeploymentSpec.Size(m)
|
||||
}
|
||||
func (m *ChaincodeDeploymentSpec) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ChaincodeDeploymentSpec.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ChaincodeDeploymentSpec proto.InternalMessageInfo
|
||||
|
||||
func (m *ChaincodeDeploymentSpec) GetChaincodeSpec() *ChaincodeSpec {
|
||||
if m != nil {
|
||||
return m.ChaincodeSpec
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ChaincodeDeploymentSpec) GetCodePackage() []byte {
|
||||
if m != nil {
|
||||
return m.CodePackage
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Carries the chaincode function and its arguments.
|
||||
type ChaincodeInvocationSpec struct {
|
||||
ChaincodeSpec *ChaincodeSpec `protobuf:"bytes,1,opt,name=chaincode_spec,json=chaincodeSpec,proto3" json:"chaincode_spec,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ChaincodeInvocationSpec) Reset() { *m = ChaincodeInvocationSpec{} }
|
||||
func (m *ChaincodeInvocationSpec) String() string { return proto.CompactTextString(m) }
|
||||
func (*ChaincodeInvocationSpec) ProtoMessage() {}
|
||||
func (*ChaincodeInvocationSpec) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_202814c635ff5fee, []int{4}
|
||||
}
|
||||
|
||||
func (m *ChaincodeInvocationSpec) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ChaincodeInvocationSpec.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ChaincodeInvocationSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ChaincodeInvocationSpec.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ChaincodeInvocationSpec) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ChaincodeInvocationSpec.Merge(m, src)
|
||||
}
|
||||
func (m *ChaincodeInvocationSpec) XXX_Size() int {
|
||||
return xxx_messageInfo_ChaincodeInvocationSpec.Size(m)
|
||||
}
|
||||
func (m *ChaincodeInvocationSpec) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ChaincodeInvocationSpec.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ChaincodeInvocationSpec proto.InternalMessageInfo
|
||||
|
||||
func (m *ChaincodeInvocationSpec) GetChaincodeSpec() *ChaincodeSpec {
|
||||
if m != nil {
|
||||
return m.ChaincodeSpec
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// LifecycleEvent is used as the payload of the chaincode event emitted by LSCC
|
||||
type LifecycleEvent struct {
|
||||
ChaincodeName string `protobuf:"bytes,1,opt,name=chaincode_name,json=chaincodeName,proto3" json:"chaincode_name,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *LifecycleEvent) Reset() { *m = LifecycleEvent{} }
|
||||
func (m *LifecycleEvent) String() string { return proto.CompactTextString(m) }
|
||||
func (*LifecycleEvent) ProtoMessage() {}
|
||||
func (*LifecycleEvent) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_202814c635ff5fee, []int{5}
|
||||
}
|
||||
|
||||
func (m *LifecycleEvent) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_LifecycleEvent.Unmarshal(m, b)
|
||||
}
|
||||
func (m *LifecycleEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_LifecycleEvent.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *LifecycleEvent) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_LifecycleEvent.Merge(m, src)
|
||||
}
|
||||
func (m *LifecycleEvent) XXX_Size() int {
|
||||
return xxx_messageInfo_LifecycleEvent.Size(m)
|
||||
}
|
||||
func (m *LifecycleEvent) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_LifecycleEvent.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_LifecycleEvent proto.InternalMessageInfo
|
||||
|
||||
func (m *LifecycleEvent) GetChaincodeName() string {
|
||||
if m != nil {
|
||||
return m.ChaincodeName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// CDSData is data stored in the LSCC on instantiation of a CC
|
||||
// for CDSPackage. This needs to be serialized for ChaincodeData
|
||||
// hence the protobuf format
|
||||
type CDSData struct {
|
||||
Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"`
|
||||
Metadatahash []byte `protobuf:"bytes,2,opt,name=metadatahash,proto3" json:"metadatahash,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *CDSData) Reset() { *m = CDSData{} }
|
||||
func (m *CDSData) String() string { return proto.CompactTextString(m) }
|
||||
func (*CDSData) ProtoMessage() {}
|
||||
func (*CDSData) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_202814c635ff5fee, []int{6}
|
||||
}
|
||||
|
||||
func (m *CDSData) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_CDSData.Unmarshal(m, b)
|
||||
}
|
||||
func (m *CDSData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_CDSData.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *CDSData) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_CDSData.Merge(m, src)
|
||||
}
|
||||
func (m *CDSData) XXX_Size() int {
|
||||
return xxx_messageInfo_CDSData.Size(m)
|
||||
}
|
||||
func (m *CDSData) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_CDSData.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_CDSData proto.InternalMessageInfo
|
||||
|
||||
func (m *CDSData) GetHash() []byte {
|
||||
if m != nil {
|
||||
return m.Hash
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *CDSData) GetMetadatahash() []byte {
|
||||
if m != nil {
|
||||
return m.Metadatahash
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ChaincodeData defines the datastructure for chaincodes to be serialized by proto
|
||||
// Type provides an additional check by directing to use a specific package after instantiation
|
||||
// Data is Type specific (see CDSPackage and SignedCDSPackage)
|
||||
type ChaincodeData struct {
|
||||
// Name of the chaincode
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// Version of the chaincode
|
||||
Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
|
||||
// Escc for the chaincode instance
|
||||
Escc string `protobuf:"bytes,3,opt,name=escc,proto3" json:"escc,omitempty"`
|
||||
// Vscc for the chaincode instance
|
||||
Vscc string `protobuf:"bytes,4,opt,name=vscc,proto3" json:"vscc,omitempty"`
|
||||
// Policy endorsement policy for the chaincode instance
|
||||
Policy *common.SignaturePolicyEnvelope `protobuf:"bytes,5,opt,name=policy,proto3" json:"policy,omitempty"`
|
||||
// Data data specific to the package
|
||||
Data []byte `protobuf:"bytes,6,opt,name=data,proto3" json:"data,omitempty"`
|
||||
// Id of the chaincode that's the unique fingerprint for the CC This is not
|
||||
// currently used anywhere but serves as a good eyecatcher
|
||||
Id []byte `protobuf:"bytes,7,opt,name=id,proto3" json:"id,omitempty"`
|
||||
// InstantiationPolicy for the chaincode
|
||||
InstantiationPolicy *common.SignaturePolicyEnvelope `protobuf:"bytes,8,opt,name=instantiation_policy,json=instantiationPolicy,proto3" json:"instantiation_policy,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ChaincodeData) Reset() { *m = ChaincodeData{} }
|
||||
func (m *ChaincodeData) String() string { return proto.CompactTextString(m) }
|
||||
func (*ChaincodeData) ProtoMessage() {}
|
||||
func (*ChaincodeData) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_202814c635ff5fee, []int{7}
|
||||
}
|
||||
|
||||
func (m *ChaincodeData) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ChaincodeData.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ChaincodeData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ChaincodeData.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ChaincodeData) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ChaincodeData.Merge(m, src)
|
||||
}
|
||||
func (m *ChaincodeData) XXX_Size() int {
|
||||
return xxx_messageInfo_ChaincodeData.Size(m)
|
||||
}
|
||||
func (m *ChaincodeData) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ChaincodeData.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ChaincodeData proto.InternalMessageInfo
|
||||
|
||||
func (m *ChaincodeData) GetName() string {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ChaincodeData) GetVersion() string {
|
||||
if m != nil {
|
||||
return m.Version
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ChaincodeData) GetEscc() string {
|
||||
if m != nil {
|
||||
return m.Escc
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ChaincodeData) GetVscc() string {
|
||||
if m != nil {
|
||||
return m.Vscc
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ChaincodeData) GetPolicy() *common.SignaturePolicyEnvelope {
|
||||
if m != nil {
|
||||
return m.Policy
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ChaincodeData) GetData() []byte {
|
||||
if m != nil {
|
||||
return m.Data
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ChaincodeData) GetId() []byte {
|
||||
if m != nil {
|
||||
return m.Id
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ChaincodeData) GetInstantiationPolicy() *common.SignaturePolicyEnvelope {
|
||||
if m != nil {
|
||||
return m.InstantiationPolicy
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("protos.ChaincodeSpec_Type", ChaincodeSpec_Type_name, ChaincodeSpec_Type_value)
|
||||
proto.RegisterType((*ChaincodeID)(nil), "protos.ChaincodeID")
|
||||
proto.RegisterType((*ChaincodeInput)(nil), "protos.ChaincodeInput")
|
||||
proto.RegisterMapType((map[string][]byte)(nil), "protos.ChaincodeInput.DecorationsEntry")
|
||||
proto.RegisterType((*ChaincodeSpec)(nil), "protos.ChaincodeSpec")
|
||||
proto.RegisterType((*ChaincodeDeploymentSpec)(nil), "protos.ChaincodeDeploymentSpec")
|
||||
proto.RegisterType((*ChaincodeInvocationSpec)(nil), "protos.ChaincodeInvocationSpec")
|
||||
proto.RegisterType((*LifecycleEvent)(nil), "protos.LifecycleEvent")
|
||||
proto.RegisterType((*CDSData)(nil), "protos.CDSData")
|
||||
proto.RegisterType((*ChaincodeData)(nil), "protos.ChaincodeData")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("peer/chaincode.proto", fileDescriptor_202814c635ff5fee) }
|
||||
|
||||
var fileDescriptor_202814c635ff5fee = []byte{
|
||||
// 712 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xcb, 0x6e, 0xeb, 0x36,
|
||||
0x10, 0xad, 0x64, 0xf9, 0x11, 0xca, 0x31, 0x54, 0x26, 0x69, 0x84, 0x6c, 0xea, 0x0a, 0x28, 0xea,
|
||||
0x45, 0x22, 0x03, 0x2e, 0xd0, 0x14, 0x45, 0x11, 0xc0, 0x8d, 0xdc, 0xc0, 0x41, 0xe0, 0x04, 0x4c,
|
||||
0xdb, 0x45, 0x37, 0x06, 0x43, 0x8d, 0x65, 0x22, 0x36, 0x25, 0x48, 0xb4, 0x50, 0xfd, 0x48, 0xd7,
|
||||
0xfd, 0x9c, 0xfe, 0xd5, 0xbd, 0x20, 0xe9, 0xe7, 0x4d, 0x16, 0x17, 0xb8, 0x2b, 0x8d, 0x0e, 0xcf,
|
||||
0x3c, 0xce, 0x90, 0x33, 0xe8, 0x34, 0x03, 0xc8, 0xfb, 0x6c, 0x4e, 0xb9, 0x60, 0x69, 0x0c, 0x61,
|
||||
0x96, 0xa7, 0x32, 0xc5, 0x0d, 0xfd, 0x29, 0x2e, 0xce, 0x58, 0xba, 0x5c, 0xa6, 0xa2, 0x9f, 0xa5,
|
||||
0x0b, 0xce, 0x38, 0x14, 0xe6, 0x38, 0x78, 0x44, 0xee, 0xed, 0xc6, 0x63, 0x1c, 0x61, 0x8c, 0x9c,
|
||||
0x8c, 0xca, 0xb9, 0x6f, 0x75, 0xad, 0xde, 0x11, 0xd1, 0xb6, 0xc2, 0x04, 0x5d, 0x82, 0x6f, 0x1b,
|
||||
0x4c, 0xd9, 0xd8, 0x47, 0xcd, 0x12, 0xf2, 0x82, 0xa7, 0xc2, 0xaf, 0x69, 0x78, 0xf3, 0x1b, 0xfc,
|
||||
0x6f, 0xa1, 0xce, 0x2e, 0xa2, 0xc8, 0x56, 0x52, 0x05, 0xa0, 0x79, 0x52, 0xf8, 0x56, 0xb7, 0xd6,
|
||||
0x6b, 0x13, 0x6d, 0xe3, 0x31, 0x72, 0x63, 0x60, 0x69, 0x4e, 0x25, 0x4f, 0x45, 0xe1, 0xdb, 0xdd,
|
||||
0x5a, 0xcf, 0x1d, 0xfc, 0x60, 0x8a, 0x2a, 0xc2, 0xc3, 0x00, 0x61, 0xb4, 0x63, 0x8e, 0x84, 0xcc,
|
||||
0x2b, 0xb2, 0xef, 0x8b, 0xcf, 0x51, 0x93, 0x17, 0x53, 0x2e, 0xb8, 0xd4, 0xb5, 0xb4, 0x48, 0x83,
|
||||
0x17, 0x63, 0xc1, 0xe5, 0xc5, 0x0d, 0xf2, 0x3e, 0xf5, 0xc4, 0x1e, 0xaa, 0xbd, 0x42, 0xb5, 0xd6,
|
||||
0xa7, 0x4c, 0x7c, 0x8a, 0xea, 0x25, 0x5d, 0xac, 0x8c, 0xbe, 0x36, 0x31, 0x3f, 0xbf, 0xd8, 0x3f,
|
||||
0x5b, 0xc1, 0x07, 0x0b, 0x1d, 0x6f, 0x2b, 0x79, 0xce, 0x80, 0xe1, 0x10, 0x39, 0xb2, 0xca, 0x40,
|
||||
0xbb, 0x77, 0x06, 0x17, 0x6f, 0xca, 0x55, 0xa4, 0xf0, 0x8f, 0x2a, 0x03, 0xa2, 0x79, 0xf8, 0x27,
|
||||
0xd4, 0xde, 0xde, 0xc7, 0x94, 0xc7, 0x3a, 0x85, 0x3b, 0x38, 0x79, 0x2b, 0x33, 0x22, 0xee, 0x96,
|
||||
0x38, 0x8e, 0xf1, 0x25, 0xaa, 0x73, 0xa5, 0x5c, 0x0b, 0x72, 0x07, 0xdf, 0xbc, 0xdf, 0x17, 0x62,
|
||||
0x48, 0xea, 0x32, 0x24, 0x5f, 0x42, 0xba, 0x92, 0xbe, 0xd3, 0xb5, 0x7a, 0x75, 0xb2, 0xf9, 0x0d,
|
||||
0x6e, 0x90, 0xa3, 0xaa, 0xc1, 0xc7, 0xe8, 0xe8, 0xcf, 0x49, 0x34, 0xfa, 0x7d, 0x3c, 0x19, 0x45,
|
||||
0xde, 0x57, 0x18, 0xa1, 0xc6, 0xdd, 0xe3, 0xc3, 0x70, 0x72, 0xe7, 0x59, 0xb8, 0x85, 0x9c, 0xc9,
|
||||
0x63, 0x34, 0xf2, 0x6c, 0xdc, 0x44, 0xb5, 0xdb, 0x21, 0xf1, 0x6a, 0x0a, 0xba, 0x1f, 0xfe, 0x35,
|
||||
0xf4, 0x9c, 0xe0, 0x3f, 0x0b, 0x9d, 0x6f, 0x73, 0x46, 0x90, 0x2d, 0xd2, 0x6a, 0x09, 0x42, 0xea,
|
||||
0x5e, 0xfc, 0x8a, 0x3a, 0x3b, 0x6d, 0x45, 0x06, 0x4c, 0x77, 0xc5, 0x1d, 0x9c, 0xbd, 0xdb, 0x15,
|
||||
0x72, 0xcc, 0x0e, 0x3a, 0xf9, 0x1d, 0x6a, 0x6b, 0xc7, 0x8c, 0xb2, 0x57, 0x9a, 0x80, 0x16, 0xda,
|
||||
0x26, 0xae, 0xc2, 0x9e, 0x0c, 0x74, 0xef, 0xb4, 0x6c, 0xaf, 0x76, 0xef, 0xb4, 0x1c, 0xaf, 0x4e,
|
||||
0x3a, 0x30, 0x9b, 0x01, 0x93, 0xbc, 0x84, 0x69, 0x4c, 0x25, 0x90, 0x16, 0xfc, 0x03, 0x6c, 0x0a,
|
||||
0xa2, 0x0c, 0xb2, 0xbd, 0x0a, 0xc7, 0xa2, 0x4c, 0x99, 0xbe, 0xed, 0x2f, 0xaf, 0xd0, 0xa4, 0x27,
|
||||
0x5f, 0xf3, 0x78, 0x9a, 0x80, 0x00, 0xf3, 0x88, 0xa6, 0x74, 0x91, 0x04, 0xd7, 0xa8, 0xf3, 0xc0,
|
||||
0x67, 0xc0, 0x2a, 0xb6, 0x80, 0x51, 0x09, 0x42, 0xe2, 0xef, 0xf7, 0x13, 0xe9, 0x59, 0x31, 0xef,
|
||||
0x6b, 0x17, 0x71, 0x42, 0x97, 0x10, 0x0c, 0x51, 0xf3, 0x36, 0x7a, 0x8e, 0xa8, 0xa4, 0x6a, 0x24,
|
||||
0xe6, 0xb4, 0x30, 0x73, 0xd6, 0x26, 0xda, 0xc6, 0x01, 0x6a, 0x2f, 0x41, 0xd2, 0x98, 0x4a, 0xaa,
|
||||
0xcf, 0xcc, 0x7b, 0x3c, 0xc0, 0x82, 0x7f, 0xed, 0xbd, 0x27, 0xb9, 0x89, 0xb4, 0x97, 0xf1, 0xcd,
|
||||
0x74, 0xda, 0x07, 0xd3, 0xa9, 0xd8, 0x50, 0x30, 0xb6, 0x1e, 0x5a, 0x6d, 0x2b, 0xac, 0x54, 0x98,
|
||||
0x63, 0x30, 0x65, 0xe3, 0x6b, 0xd4, 0xd0, 0x8b, 0xa2, 0xf2, 0xeb, 0xba, 0x65, 0xdf, 0x86, 0x66,
|
||||
0x7d, 0x84, 0xcf, 0x3c, 0x11, 0x54, 0xae, 0x72, 0x78, 0xd2, 0xc7, 0x23, 0x51, 0xc2, 0x22, 0xcd,
|
||||
0x80, 0xac, 0xe9, 0x2a, 0x98, 0x2a, 0xd6, 0x6f, 0x18, 0x61, 0xca, 0xc6, 0x1d, 0x64, 0xf3, 0xd8,
|
||||
0x6f, 0x6a, 0xc4, 0xe6, 0x31, 0x26, 0xe8, 0x94, 0x8b, 0x42, 0x52, 0x21, 0xb9, 0xe9, 0xea, 0x3a,
|
||||
0x55, 0xeb, 0xf3, 0x52, 0x9d, 0x1c, 0x38, 0x9b, 0xc3, 0xdf, 0x08, 0x0a, 0xd2, 0x3c, 0x09, 0xe7,
|
||||
0x55, 0x06, 0xf9, 0x02, 0xe2, 0x04, 0xf2, 0x70, 0x46, 0x5f, 0x72, 0xce, 0x36, 0x77, 0xad, 0x96,
|
||||
0xe3, 0xdf, 0x97, 0x09, 0x97, 0xf3, 0xd5, 0x8b, 0xca, 0xd0, 0xdf, 0xa3, 0xf6, 0x0d, 0xf5, 0xca,
|
||||
0x50, 0xaf, 0x92, 0xb4, 0xaf, 0xd8, 0x2f, 0x66, 0x75, 0xfe, 0xf8, 0x31, 0x00, 0x00, 0xff, 0xff,
|
||||
0x4e, 0xcb, 0x76, 0xde, 0x59, 0x05, 0x00, 0x00,
|
||||
}
|
110
chaincode/vendor/github.com/hyperledger/fabric-protos-go/peer/chaincode_event.pb.go
generated
vendored
110
chaincode/vendor/github.com/hyperledger/fabric-protos-go/peer/chaincode_event.pb.go
generated
vendored
@ -0,0 +1,110 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: peer/chaincode_event.proto
|
||||
|
||||
package peer
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
//ChaincodeEvent is used for events and registrations that are specific to chaincode
|
||||
//string type - "chaincode"
|
||||
type ChaincodeEvent struct {
|
||||
ChaincodeId string `protobuf:"bytes,1,opt,name=chaincode_id,json=chaincodeId,proto3" json:"chaincode_id,omitempty"`
|
||||
TxId string `protobuf:"bytes,2,opt,name=tx_id,json=txId,proto3" json:"tx_id,omitempty"`
|
||||
EventName string `protobuf:"bytes,3,opt,name=event_name,json=eventName,proto3" json:"event_name,omitempty"`
|
||||
Payload []byte `protobuf:"bytes,4,opt,name=payload,proto3" json:"payload,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ChaincodeEvent) Reset() { *m = ChaincodeEvent{} }
|
||||
func (m *ChaincodeEvent) String() string { return proto.CompactTextString(m) }
|
||||
func (*ChaincodeEvent) ProtoMessage() {}
|
||||
func (*ChaincodeEvent) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_e11f3d5e149f14fa, []int{0}
|
||||
}
|
||||
|
||||
func (m *ChaincodeEvent) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ChaincodeEvent.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ChaincodeEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ChaincodeEvent.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ChaincodeEvent) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ChaincodeEvent.Merge(m, src)
|
||||
}
|
||||
func (m *ChaincodeEvent) XXX_Size() int {
|
||||
return xxx_messageInfo_ChaincodeEvent.Size(m)
|
||||
}
|
||||
func (m *ChaincodeEvent) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ChaincodeEvent.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ChaincodeEvent proto.InternalMessageInfo
|
||||
|
||||
func (m *ChaincodeEvent) GetChaincodeId() string {
|
||||
if m != nil {
|
||||
return m.ChaincodeId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ChaincodeEvent) GetTxId() string {
|
||||
if m != nil {
|
||||
return m.TxId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ChaincodeEvent) GetEventName() string {
|
||||
if m != nil {
|
||||
return m.EventName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ChaincodeEvent) GetPayload() []byte {
|
||||
if m != nil {
|
||||
return m.Payload
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*ChaincodeEvent)(nil), "protos.ChaincodeEvent")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("peer/chaincode_event.proto", fileDescriptor_e11f3d5e149f14fa) }
|
||||
|
||||
var fileDescriptor_e11f3d5e149f14fa = []byte{
|
||||
// 221 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x2a, 0x48, 0x4d, 0x2d,
|
||||
0xd2, 0x4f, 0xce, 0x48, 0xcc, 0xcc, 0x4b, 0xce, 0x4f, 0x49, 0x8d, 0x4f, 0x2d, 0x4b, 0xcd, 0x2b,
|
||||
0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0x53, 0xc5, 0x4a, 0x8d, 0x8c, 0x5c, 0x7c,
|
||||
0xce, 0x30, 0x15, 0xae, 0x20, 0x05, 0x42, 0x8a, 0x5c, 0x3c, 0x08, 0x3d, 0x99, 0x29, 0x12, 0x8c,
|
||||
0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0xdc, 0x70, 0x31, 0xcf, 0x14, 0x21, 0x61, 0x2e, 0xd6, 0x92, 0x0a,
|
||||
0x90, 0x1c, 0x13, 0x58, 0x8e, 0xa5, 0xa4, 0xc2, 0x33, 0x45, 0x48, 0x96, 0x8b, 0x0b, 0x6c, 0x43,
|
||||
0x7c, 0x5e, 0x62, 0x6e, 0xaa, 0x04, 0x33, 0x58, 0x86, 0x13, 0x2c, 0xe2, 0x97, 0x98, 0x9b, 0x2a,
|
||||
0x24, 0xc1, 0xc5, 0x5e, 0x90, 0x58, 0x99, 0x93, 0x9f, 0x98, 0x22, 0xc1, 0xa2, 0xc0, 0xa8, 0xc1,
|
||||
0x13, 0x04, 0xe3, 0x3a, 0x65, 0x72, 0x29, 0xe5, 0x17, 0xa5, 0xeb, 0x65, 0x54, 0x16, 0xa4, 0x16,
|
||||
0xe5, 0xa4, 0xa6, 0xa4, 0xa7, 0x16, 0xe9, 0xa5, 0x25, 0x26, 0x15, 0x65, 0x26, 0x43, 0xdc, 0x5a,
|
||||
0xac, 0x07, 0xf2, 0x87, 0x93, 0x28, 0xaa, 0x33, 0x03, 0x12, 0x93, 0xb3, 0x13, 0xd3, 0x53, 0xa3,
|
||||
0x74, 0xd2, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0x91, 0x4c, 0xd0, 0x87,
|
||||
0x98, 0xa0, 0x0b, 0x31, 0x41, 0x37, 0x3d, 0x5f, 0x1f, 0x64, 0x48, 0x12, 0xc4, 0xdb, 0xc6, 0x80,
|
||||
0x00, 0x00, 0x00, 0xff, 0xff, 0xf1, 0xe0, 0xae, 0xf8, 0x1b, 0x01, 0x00, 0x00,
|
||||
}
|
1311
chaincode/vendor/github.com/hyperledger/fabric-protos-go/peer/chaincode_shim.pb.go
generated
vendored
1311
chaincode/vendor/github.com/hyperledger/fabric-protos-go/peer/chaincode_shim.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
364
chaincode/vendor/github.com/hyperledger/fabric-protos-go/peer/collection.pb.go
generated
vendored
364
chaincode/vendor/github.com/hyperledger/fabric-protos-go/peer/collection.pb.go
generated
vendored
225
chaincode/vendor/github.com/hyperledger/fabric-protos-go/peer/configuration.pb.go
generated
vendored
225
chaincode/vendor/github.com/hyperledger/fabric-protos-go/peer/configuration.pb.go
generated
vendored
@ -0,0 +1,225 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: peer/configuration.proto
|
||||
|
||||
package peer
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// AnchorPeers simply represents list of anchor peers which is used in ConfigurationItem
|
||||
type AnchorPeers struct {
|
||||
AnchorPeers []*AnchorPeer `protobuf:"bytes,1,rep,name=anchor_peers,json=anchorPeers,proto3" json:"anchor_peers,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *AnchorPeers) Reset() { *m = AnchorPeers{} }
|
||||
func (m *AnchorPeers) String() string { return proto.CompactTextString(m) }
|
||||
func (*AnchorPeers) ProtoMessage() {}
|
||||
func (*AnchorPeers) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_4978ae8738390a60, []int{0}
|
||||
}
|
||||
|
||||
func (m *AnchorPeers) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_AnchorPeers.Unmarshal(m, b)
|
||||
}
|
||||
func (m *AnchorPeers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_AnchorPeers.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *AnchorPeers) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_AnchorPeers.Merge(m, src)
|
||||
}
|
||||
func (m *AnchorPeers) XXX_Size() int {
|
||||
return xxx_messageInfo_AnchorPeers.Size(m)
|
||||
}
|
||||
func (m *AnchorPeers) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_AnchorPeers.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_AnchorPeers proto.InternalMessageInfo
|
||||
|
||||
func (m *AnchorPeers) GetAnchorPeers() []*AnchorPeer {
|
||||
if m != nil {
|
||||
return m.AnchorPeers
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AnchorPeer message structure which provides information about anchor peer, it includes host name,
|
||||
// port number and peer certificate.
|
||||
type AnchorPeer struct {
|
||||
Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"`
|
||||
Port int32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *AnchorPeer) Reset() { *m = AnchorPeer{} }
|
||||
func (m *AnchorPeer) String() string { return proto.CompactTextString(m) }
|
||||
func (*AnchorPeer) ProtoMessage() {}
|
||||
func (*AnchorPeer) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_4978ae8738390a60, []int{1}
|
||||
}
|
||||
|
||||
func (m *AnchorPeer) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_AnchorPeer.Unmarshal(m, b)
|
||||
}
|
||||
func (m *AnchorPeer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_AnchorPeer.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *AnchorPeer) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_AnchorPeer.Merge(m, src)
|
||||
}
|
||||
func (m *AnchorPeer) XXX_Size() int {
|
||||
return xxx_messageInfo_AnchorPeer.Size(m)
|
||||
}
|
||||
func (m *AnchorPeer) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_AnchorPeer.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_AnchorPeer proto.InternalMessageInfo
|
||||
|
||||
func (m *AnchorPeer) GetHost() string {
|
||||
if m != nil {
|
||||
return m.Host
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *AnchorPeer) GetPort() int32 {
|
||||
if m != nil {
|
||||
return m.Port
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// APIResource represents an API resource in the peer whose ACL
|
||||
// is determined by the policy_ref field
|
||||
type APIResource struct {
|
||||
PolicyRef string `protobuf:"bytes,1,opt,name=policy_ref,json=policyRef,proto3" json:"policy_ref,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *APIResource) Reset() { *m = APIResource{} }
|
||||
func (m *APIResource) String() string { return proto.CompactTextString(m) }
|
||||
func (*APIResource) ProtoMessage() {}
|
||||
func (*APIResource) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_4978ae8738390a60, []int{2}
|
||||
}
|
||||
|
||||
func (m *APIResource) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_APIResource.Unmarshal(m, b)
|
||||
}
|
||||
func (m *APIResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_APIResource.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *APIResource) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_APIResource.Merge(m, src)
|
||||
}
|
||||
func (m *APIResource) XXX_Size() int {
|
||||
return xxx_messageInfo_APIResource.Size(m)
|
||||
}
|
||||
func (m *APIResource) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_APIResource.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_APIResource proto.InternalMessageInfo
|
||||
|
||||
func (m *APIResource) GetPolicyRef() string {
|
||||
if m != nil {
|
||||
return m.PolicyRef
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// ACLs provides mappings for resources in a channel. APIResource encapsulates
|
||||
// reference to a policy used to determine ACL for the resource
|
||||
type ACLs struct {
|
||||
Acls map[string]*APIResource `protobuf:"bytes,1,rep,name=acls,proto3" json:"acls,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ACLs) Reset() { *m = ACLs{} }
|
||||
func (m *ACLs) String() string { return proto.CompactTextString(m) }
|
||||
func (*ACLs) ProtoMessage() {}
|
||||
func (*ACLs) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_4978ae8738390a60, []int{3}
|
||||
}
|
||||
|
||||
func (m *ACLs) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ACLs.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ACLs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ACLs.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ACLs) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ACLs.Merge(m, src)
|
||||
}
|
||||
func (m *ACLs) XXX_Size() int {
|
||||
return xxx_messageInfo_ACLs.Size(m)
|
||||
}
|
||||
func (m *ACLs) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ACLs.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ACLs proto.InternalMessageInfo
|
||||
|
||||
func (m *ACLs) GetAcls() map[string]*APIResource {
|
||||
if m != nil {
|
||||
return m.Acls
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*AnchorPeers)(nil), "protos.AnchorPeers")
|
||||
proto.RegisterType((*AnchorPeer)(nil), "protos.AnchorPeer")
|
||||
proto.RegisterType((*APIResource)(nil), "protos.APIResource")
|
||||
proto.RegisterType((*ACLs)(nil), "protos.ACLs")
|
||||
proto.RegisterMapType((map[string]*APIResource)(nil), "protos.ACLs.AclsEntry")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("peer/configuration.proto", fileDescriptor_4978ae8738390a60) }
|
||||
|
||||
var fileDescriptor_4978ae8738390a60 = []byte{
|
||||
// 296 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x91, 0xcd, 0x4b, 0xc3, 0x40,
|
||||
0x10, 0xc5, 0x49, 0x3f, 0x84, 0x4e, 0x3c, 0xc8, 0x0a, 0x12, 0x04, 0xa1, 0xe4, 0x54, 0xa5, 0x4d,
|
||||
0xa0, 0x2a, 0x88, 0xb7, 0x58, 0x3d, 0x08, 0x3d, 0x94, 0x3d, 0x7a, 0x29, 0xe9, 0x3a, 0xf9, 0xc0,
|
||||
0x98, 0x09, 0xb3, 0x89, 0x90, 0x9b, 0x7f, 0xba, 0x64, 0xb7, 0x4d, 0x3c, 0xed, 0xdb, 0xb7, 0xbf,
|
||||
0x37, 0x3c, 0x76, 0xc0, 0xab, 0x10, 0x39, 0x54, 0x54, 0x26, 0x79, 0xda, 0x70, 0x5c, 0xe7, 0x54,
|
||||
0x06, 0x15, 0x53, 0x4d, 0xe2, 0xcc, 0x1c, 0xda, 0x7f, 0x05, 0x37, 0x2a, 0x55, 0x46, 0xbc, 0x43,
|
||||
0x64, 0x2d, 0x1e, 0xe1, 0x3c, 0x36, 0xd7, 0x7d, 0x97, 0xd4, 0x9e, 0x33, 0x1f, 0x2f, 0xdc, 0xb5,
|
||||
0xb0, 0x21, 0x1d, 0x0c, 0xa8, 0x74, 0xe3, 0x21, 0xe6, 0x3f, 0x00, 0x0c, 0x4f, 0x42, 0xc0, 0x24,
|
||||
0x23, 0x5d, 0x7b, 0xce, 0xdc, 0x59, 0xcc, 0xa4, 0xd1, 0x9d, 0x57, 0x11, 0xd7, 0xde, 0x68, 0xee,
|
||||
0x2c, 0xa6, 0xd2, 0x68, 0x7f, 0x09, 0x6e, 0xb4, 0x7b, 0x97, 0xa8, 0xa9, 0x61, 0x85, 0xe2, 0x06,
|
||||
0xa0, 0xa2, 0x22, 0x57, 0xed, 0x9e, 0x31, 0x39, 0x86, 0x67, 0xd6, 0x91, 0x98, 0xf8, 0xbf, 0x0e,
|
||||
0x4c, 0xa2, 0xcd, 0x56, 0x8b, 0x3b, 0x98, 0xc4, 0xaa, 0x38, 0x75, 0xbb, 0xea, 0xbb, 0x6d, 0xb6,
|
||||
0x3a, 0x88, 0x54, 0xa1, 0xdf, 0xca, 0x9a, 0x5b, 0x69, 0x98, 0xeb, 0x2d, 0xcc, 0x7a, 0x4b, 0x5c,
|
||||
0xc0, 0xf8, 0x0b, 0xdb, 0xe3, 0xe4, 0x4e, 0x8a, 0x5b, 0x98, 0xfe, 0xc4, 0x45, 0x83, 0xa6, 0x96,
|
||||
0xbb, 0xbe, 0xec, 0x67, 0x0d, 0xb5, 0xa4, 0x25, 0x9e, 0x47, 0x4f, 0xce, 0x8b, 0x04, 0x9f, 0x38,
|
||||
0x0d, 0xb2, 0xb6, 0x42, 0x2e, 0xf0, 0x33, 0x45, 0x0e, 0x92, 0xf8, 0xc0, 0xb9, 0x3a, 0xe5, 0xba,
|
||||
0x4f, 0xfb, 0x58, 0xa6, 0x79, 0x9d, 0x35, 0x87, 0x40, 0xd1, 0x77, 0xf8, 0x0f, 0x0d, 0x2d, 0xba,
|
||||
0xb2, 0xe8, 0x2a, 0xa5, 0xb0, 0xa3, 0x0f, 0x76, 0x11, 0xf7, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff,
|
||||
0x10, 0x49, 0xdc, 0x44, 0xab, 0x01, 0x00, 0x00,
|
||||
}
|
@ -0,0 +1,742 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: peer/events.proto
|
||||
|
||||
package peer
|
||||
|
||||
import (
|
||||
context "context"
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
common "github.com/hyperledger/fabric-protos-go/common"
|
||||
rwset "github.com/hyperledger/fabric-protos-go/ledger/rwset"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// FilteredBlock is a minimal set of information about a block
|
||||
type FilteredBlock struct {
|
||||
ChannelId string `protobuf:"bytes,1,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"`
|
||||
Number uint64 `protobuf:"varint,2,opt,name=number,proto3" json:"number,omitempty"`
|
||||
FilteredTransactions []*FilteredTransaction `protobuf:"bytes,4,rep,name=filtered_transactions,json=filteredTransactions,proto3" json:"filtered_transactions,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *FilteredBlock) Reset() { *m = FilteredBlock{} }
|
||||
func (m *FilteredBlock) String() string { return proto.CompactTextString(m) }
|
||||
func (*FilteredBlock) ProtoMessage() {}
|
||||
func (*FilteredBlock) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_5eedcc5fab2714e6, []int{0}
|
||||
}
|
||||
|
||||
func (m *FilteredBlock) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_FilteredBlock.Unmarshal(m, b)
|
||||
}
|
||||
func (m *FilteredBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_FilteredBlock.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *FilteredBlock) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_FilteredBlock.Merge(m, src)
|
||||
}
|
||||
func (m *FilteredBlock) XXX_Size() int {
|
||||
return xxx_messageInfo_FilteredBlock.Size(m)
|
||||
}
|
||||
func (m *FilteredBlock) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_FilteredBlock.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_FilteredBlock proto.InternalMessageInfo
|
||||
|
||||
func (m *FilteredBlock) GetChannelId() string {
|
||||
if m != nil {
|
||||
return m.ChannelId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *FilteredBlock) GetNumber() uint64 {
|
||||
if m != nil {
|
||||
return m.Number
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *FilteredBlock) GetFilteredTransactions() []*FilteredTransaction {
|
||||
if m != nil {
|
||||
return m.FilteredTransactions
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// FilteredTransaction is a minimal set of information about a transaction
|
||||
// within a block
|
||||
type FilteredTransaction struct {
|
||||
Txid string `protobuf:"bytes,1,opt,name=txid,proto3" json:"txid,omitempty"`
|
||||
Type common.HeaderType `protobuf:"varint,2,opt,name=type,proto3,enum=common.HeaderType" json:"type,omitempty"`
|
||||
TxValidationCode TxValidationCode `protobuf:"varint,3,opt,name=tx_validation_code,json=txValidationCode,proto3,enum=protos.TxValidationCode" json:"tx_validation_code,omitempty"`
|
||||
// Types that are valid to be assigned to Data:
|
||||
// *FilteredTransaction_TransactionActions
|
||||
Data isFilteredTransaction_Data `protobuf_oneof:"Data"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *FilteredTransaction) Reset() { *m = FilteredTransaction{} }
|
||||
func (m *FilteredTransaction) String() string { return proto.CompactTextString(m) }
|
||||
func (*FilteredTransaction) ProtoMessage() {}
|
||||
func (*FilteredTransaction) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_5eedcc5fab2714e6, []int{1}
|
||||
}
|
||||
|
||||
func (m *FilteredTransaction) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_FilteredTransaction.Unmarshal(m, b)
|
||||
}
|
||||
func (m *FilteredTransaction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_FilteredTransaction.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *FilteredTransaction) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_FilteredTransaction.Merge(m, src)
|
||||
}
|
||||
func (m *FilteredTransaction) XXX_Size() int {
|
||||
return xxx_messageInfo_FilteredTransaction.Size(m)
|
||||
}
|
||||
func (m *FilteredTransaction) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_FilteredTransaction.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_FilteredTransaction proto.InternalMessageInfo
|
||||
|
||||
func (m *FilteredTransaction) GetTxid() string {
|
||||
if m != nil {
|
||||
return m.Txid
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *FilteredTransaction) GetType() common.HeaderType {
|
||||
if m != nil {
|
||||
return m.Type
|
||||
}
|
||||
return common.HeaderType_MESSAGE
|
||||
}
|
||||
|
||||
func (m *FilteredTransaction) GetTxValidationCode() TxValidationCode {
|
||||
if m != nil {
|
||||
return m.TxValidationCode
|
||||
}
|
||||
return TxValidationCode_VALID
|
||||
}
|
||||
|
||||
type isFilteredTransaction_Data interface {
|
||||
isFilteredTransaction_Data()
|
||||
}
|
||||
|
||||
type FilteredTransaction_TransactionActions struct {
|
||||
TransactionActions *FilteredTransactionActions `protobuf:"bytes,4,opt,name=transaction_actions,json=transactionActions,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*FilteredTransaction_TransactionActions) isFilteredTransaction_Data() {}
|
||||
|
||||
func (m *FilteredTransaction) GetData() isFilteredTransaction_Data {
|
||||
if m != nil {
|
||||
return m.Data
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *FilteredTransaction) GetTransactionActions() *FilteredTransactionActions {
|
||||
if x, ok := m.GetData().(*FilteredTransaction_TransactionActions); ok {
|
||||
return x.TransactionActions
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// XXX_OneofWrappers is for the internal use of the proto package.
|
||||
func (*FilteredTransaction) XXX_OneofWrappers() []interface{} {
|
||||
return []interface{}{
|
||||
(*FilteredTransaction_TransactionActions)(nil),
|
||||
}
|
||||
}
|
||||
|
||||
// FilteredTransactionActions is a wrapper for array of TransactionAction
|
||||
// message from regular block
|
||||
type FilteredTransactionActions struct {
|
||||
ChaincodeActions []*FilteredChaincodeAction `protobuf:"bytes,1,rep,name=chaincode_actions,json=chaincodeActions,proto3" json:"chaincode_actions,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *FilteredTransactionActions) Reset() { *m = FilteredTransactionActions{} }
|
||||
func (m *FilteredTransactionActions) String() string { return proto.CompactTextString(m) }
|
||||
func (*FilteredTransactionActions) ProtoMessage() {}
|
||||
func (*FilteredTransactionActions) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_5eedcc5fab2714e6, []int{2}
|
||||
}
|
||||
|
||||
func (m *FilteredTransactionActions) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_FilteredTransactionActions.Unmarshal(m, b)
|
||||
}
|
||||
func (m *FilteredTransactionActions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_FilteredTransactionActions.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *FilteredTransactionActions) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_FilteredTransactionActions.Merge(m, src)
|
||||
}
|
||||
func (m *FilteredTransactionActions) XXX_Size() int {
|
||||
return xxx_messageInfo_FilteredTransactionActions.Size(m)
|
||||
}
|
||||
func (m *FilteredTransactionActions) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_FilteredTransactionActions.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_FilteredTransactionActions proto.InternalMessageInfo
|
||||
|
||||
func (m *FilteredTransactionActions) GetChaincodeActions() []*FilteredChaincodeAction {
|
||||
if m != nil {
|
||||
return m.ChaincodeActions
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// FilteredChaincodeAction is a minimal set of information about an action
|
||||
// within a transaction
|
||||
type FilteredChaincodeAction struct {
|
||||
ChaincodeEvent *ChaincodeEvent `protobuf:"bytes,1,opt,name=chaincode_event,json=chaincodeEvent,proto3" json:"chaincode_event,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *FilteredChaincodeAction) Reset() { *m = FilteredChaincodeAction{} }
|
||||
func (m *FilteredChaincodeAction) String() string { return proto.CompactTextString(m) }
|
||||
func (*FilteredChaincodeAction) ProtoMessage() {}
|
||||
func (*FilteredChaincodeAction) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_5eedcc5fab2714e6, []int{3}
|
||||
}
|
||||
|
||||
func (m *FilteredChaincodeAction) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_FilteredChaincodeAction.Unmarshal(m, b)
|
||||
}
|
||||
func (m *FilteredChaincodeAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_FilteredChaincodeAction.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *FilteredChaincodeAction) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_FilteredChaincodeAction.Merge(m, src)
|
||||
}
|
||||
func (m *FilteredChaincodeAction) XXX_Size() int {
|
||||
return xxx_messageInfo_FilteredChaincodeAction.Size(m)
|
||||
}
|
||||
func (m *FilteredChaincodeAction) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_FilteredChaincodeAction.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_FilteredChaincodeAction proto.InternalMessageInfo
|
||||
|
||||
func (m *FilteredChaincodeAction) GetChaincodeEvent() *ChaincodeEvent {
|
||||
if m != nil {
|
||||
return m.ChaincodeEvent
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BlockAndPrivateData contains Block and a map from tx_seq_in_block to rwset.TxPvtReadWriteSet
|
||||
type BlockAndPrivateData struct {
|
||||
Block *common.Block `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"`
|
||||
// map from tx_seq_in_block to rwset.TxPvtReadWriteSet
|
||||
PrivateDataMap map[uint64]*rwset.TxPvtReadWriteSet `protobuf:"bytes,2,rep,name=private_data_map,json=privateDataMap,proto3" json:"private_data_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *BlockAndPrivateData) Reset() { *m = BlockAndPrivateData{} }
|
||||
func (m *BlockAndPrivateData) String() string { return proto.CompactTextString(m) }
|
||||
func (*BlockAndPrivateData) ProtoMessage() {}
|
||||
func (*BlockAndPrivateData) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_5eedcc5fab2714e6, []int{4}
|
||||
}
|
||||
|
||||
func (m *BlockAndPrivateData) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_BlockAndPrivateData.Unmarshal(m, b)
|
||||
}
|
||||
func (m *BlockAndPrivateData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_BlockAndPrivateData.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *BlockAndPrivateData) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_BlockAndPrivateData.Merge(m, src)
|
||||
}
|
||||
func (m *BlockAndPrivateData) XXX_Size() int {
|
||||
return xxx_messageInfo_BlockAndPrivateData.Size(m)
|
||||
}
|
||||
func (m *BlockAndPrivateData) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_BlockAndPrivateData.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_BlockAndPrivateData proto.InternalMessageInfo
|
||||
|
||||
func (m *BlockAndPrivateData) GetBlock() *common.Block {
|
||||
if m != nil {
|
||||
return m.Block
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *BlockAndPrivateData) GetPrivateDataMap() map[uint64]*rwset.TxPvtReadWriteSet {
|
||||
if m != nil {
|
||||
return m.PrivateDataMap
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeliverResponse
|
||||
type DeliverResponse struct {
|
||||
// Types that are valid to be assigned to Type:
|
||||
// *DeliverResponse_Status
|
||||
// *DeliverResponse_Block
|
||||
// *DeliverResponse_FilteredBlock
|
||||
// *DeliverResponse_BlockAndPrivateData
|
||||
Type isDeliverResponse_Type `protobuf_oneof:"Type"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *DeliverResponse) Reset() { *m = DeliverResponse{} }
|
||||
func (m *DeliverResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*DeliverResponse) ProtoMessage() {}
|
||||
func (*DeliverResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_5eedcc5fab2714e6, []int{5}
|
||||
}
|
||||
|
||||
func (m *DeliverResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_DeliverResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *DeliverResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_DeliverResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *DeliverResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_DeliverResponse.Merge(m, src)
|
||||
}
|
||||
func (m *DeliverResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_DeliverResponse.Size(m)
|
||||
}
|
||||
func (m *DeliverResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_DeliverResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_DeliverResponse proto.InternalMessageInfo
|
||||
|
||||
type isDeliverResponse_Type interface {
|
||||
isDeliverResponse_Type()
|
||||
}
|
||||
|
||||
type DeliverResponse_Status struct {
|
||||
Status common.Status `protobuf:"varint,1,opt,name=status,proto3,enum=common.Status,oneof"`
|
||||
}
|
||||
|
||||
type DeliverResponse_Block struct {
|
||||
Block *common.Block `protobuf:"bytes,2,opt,name=block,proto3,oneof"`
|
||||
}
|
||||
|
||||
type DeliverResponse_FilteredBlock struct {
|
||||
FilteredBlock *FilteredBlock `protobuf:"bytes,3,opt,name=filtered_block,json=filteredBlock,proto3,oneof"`
|
||||
}
|
||||
|
||||
type DeliverResponse_BlockAndPrivateData struct {
|
||||
BlockAndPrivateData *BlockAndPrivateData `protobuf:"bytes,4,opt,name=block_and_private_data,json=blockAndPrivateData,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*DeliverResponse_Status) isDeliverResponse_Type() {}
|
||||
|
||||
func (*DeliverResponse_Block) isDeliverResponse_Type() {}
|
||||
|
||||
func (*DeliverResponse_FilteredBlock) isDeliverResponse_Type() {}
|
||||
|
||||
func (*DeliverResponse_BlockAndPrivateData) isDeliverResponse_Type() {}
|
||||
|
||||
func (m *DeliverResponse) GetType() isDeliverResponse_Type {
|
||||
if m != nil {
|
||||
return m.Type
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *DeliverResponse) GetStatus() common.Status {
|
||||
if x, ok := m.GetType().(*DeliverResponse_Status); ok {
|
||||
return x.Status
|
||||
}
|
||||
return common.Status_UNKNOWN
|
||||
}
|
||||
|
||||
func (m *DeliverResponse) GetBlock() *common.Block {
|
||||
if x, ok := m.GetType().(*DeliverResponse_Block); ok {
|
||||
return x.Block
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *DeliverResponse) GetFilteredBlock() *FilteredBlock {
|
||||
if x, ok := m.GetType().(*DeliverResponse_FilteredBlock); ok {
|
||||
return x.FilteredBlock
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *DeliverResponse) GetBlockAndPrivateData() *BlockAndPrivateData {
|
||||
if x, ok := m.GetType().(*DeliverResponse_BlockAndPrivateData); ok {
|
||||
return x.BlockAndPrivateData
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// XXX_OneofWrappers is for the internal use of the proto package.
|
||||
func (*DeliverResponse) XXX_OneofWrappers() []interface{} {
|
||||
return []interface{}{
|
||||
(*DeliverResponse_Status)(nil),
|
||||
(*DeliverResponse_Block)(nil),
|
||||
(*DeliverResponse_FilteredBlock)(nil),
|
||||
(*DeliverResponse_BlockAndPrivateData)(nil),
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*FilteredBlock)(nil), "protos.FilteredBlock")
|
||||
proto.RegisterType((*FilteredTransaction)(nil), "protos.FilteredTransaction")
|
||||
proto.RegisterType((*FilteredTransactionActions)(nil), "protos.FilteredTransactionActions")
|
||||
proto.RegisterType((*FilteredChaincodeAction)(nil), "protos.FilteredChaincodeAction")
|
||||
proto.RegisterType((*BlockAndPrivateData)(nil), "protos.BlockAndPrivateData")
|
||||
proto.RegisterMapType((map[uint64]*rwset.TxPvtReadWriteSet)(nil), "protos.BlockAndPrivateData.PrivateDataMapEntry")
|
||||
proto.RegisterType((*DeliverResponse)(nil), "protos.DeliverResponse")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("peer/events.proto", fileDescriptor_5eedcc5fab2714e6) }
|
||||
|
||||
var fileDescriptor_5eedcc5fab2714e6 = []byte{
|
||||
// 700 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x4d, 0x6f, 0xda, 0x4a,
|
||||
0x14, 0xc5, 0x40, 0x78, 0xca, 0x45, 0x10, 0x32, 0xbc, 0x10, 0x8b, 0xe8, 0xe9, 0x45, 0x7e, 0x7a,
|
||||
0x15, 0x8b, 0xc6, 0x54, 0x74, 0x53, 0x65, 0xd1, 0x2a, 0xe4, 0x43, 0x44, 0x6a, 0x25, 0x34, 0xa1,
|
||||
0x8d, 0x9a, 0x2e, 0xac, 0xc1, 0xbe, 0x80, 0x1b, 0x63, 0x5b, 0xf6, 0x40, 0xe1, 0x9f, 0xf4, 0x87,
|
||||
0xf5, 0x97, 0x74, 0xd5, 0x55, 0x55, 0x79, 0xc6, 0xc3, 0x57, 0x48, 0xa4, 0x6c, 0xec, 0xf1, 0xbd,
|
||||
0xe7, 0x9c, 0x3b, 0xf7, 0xf8, 0xce, 0xc0, 0x7e, 0x88, 0x18, 0x35, 0x71, 0x8a, 0x3e, 0x8f, 0xcd,
|
||||
0x30, 0x0a, 0x78, 0x40, 0x0a, 0xe2, 0x15, 0xd7, 0xab, 0x76, 0x30, 0x1e, 0x07, 0x7e, 0x53, 0xbe,
|
||||
0x64, 0xb2, 0xae, 0x7b, 0xe8, 0x0c, 0x31, 0x6a, 0x46, 0xdf, 0x62, 0xe4, 0xf2, 0x99, 0x66, 0xea,
|
||||
0x42, 0xc9, 0x1e, 0x31, 0xd7, 0xb7, 0x03, 0x07, 0x2d, 0xa1, 0x99, 0xe6, 0x6a, 0x22, 0xc7, 0x23,
|
||||
0xe6, 0xc7, 0xcc, 0xe6, 0xae, 0x52, 0x33, 0xbe, 0x6b, 0x50, 0xba, 0x72, 0x3d, 0x8e, 0x11, 0x3a,
|
||||
0x6d, 0x2f, 0xb0, 0xef, 0xc9, 0x3f, 0x00, 0xf6, 0x88, 0xf9, 0x3e, 0x7a, 0x96, 0xeb, 0xe8, 0xda,
|
||||
0xb1, 0xd6, 0xd8, 0xa5, 0xbb, 0x69, 0xe4, 0xda, 0x21, 0x35, 0x28, 0xf8, 0x93, 0x71, 0x1f, 0x23,
|
||||
0x3d, 0x7b, 0xac, 0x35, 0xf2, 0x34, 0xfd, 0x22, 0x5d, 0x38, 0x18, 0xa4, 0x3a, 0xd6, 0x4a, 0x99,
|
||||
0x58, 0xcf, 0x1f, 0xe7, 0x1a, 0xc5, 0xd6, 0x91, 0xac, 0x17, 0x9b, 0xaa, 0x58, 0x6f, 0x89, 0xa1,
|
||||
0x7f, 0x0f, 0x1e, 0x06, 0x63, 0xe3, 0x97, 0x06, 0xd5, 0x2d, 0x68, 0x42, 0x20, 0xcf, 0x67, 0x8b,
|
||||
0xad, 0x89, 0x35, 0x79, 0x01, 0x79, 0x3e, 0x0f, 0x51, 0xec, 0xa9, 0xdc, 0x22, 0x66, 0xea, 0x58,
|
||||
0x07, 0x99, 0x83, 0x51, 0x6f, 0x1e, 0x22, 0x15, 0x79, 0x72, 0x05, 0x84, 0xcf, 0xac, 0x29, 0xf3,
|
||||
0x5c, 0x87, 0x25, 0x62, 0x56, 0x62, 0x94, 0x9e, 0x13, 0x2c, 0x5d, 0x6d, 0xb1, 0x37, 0xfb, 0xb4,
|
||||
0x00, 0x9c, 0x07, 0x0e, 0xd2, 0x0a, 0xdf, 0x88, 0x90, 0x8f, 0x50, 0x5d, 0x69, 0xd2, 0x5a, 0xf6,
|
||||
0xaa, 0x35, 0x8a, 0x2d, 0xe3, 0x89, 0x5e, 0xcf, 0x24, 0xb2, 0x93, 0xa1, 0x84, 0x3f, 0x88, 0xb6,
|
||||
0x0b, 0x90, 0xbf, 0x60, 0x9c, 0x19, 0x5f, 0xa1, 0xfe, 0x38, 0x97, 0xbc, 0x87, 0xfd, 0xe5, 0x4f,
|
||||
0x56, 0xa5, 0x35, 0x61, 0xf3, 0xbf, 0x9b, 0xa5, 0xcf, 0x15, 0x50, 0x92, 0x69, 0xc5, 0x5e, 0x0f,
|
||||
0xc4, 0xc6, 0x1d, 0x1c, 0x3e, 0x02, 0x26, 0xef, 0x60, 0x6f, 0x63, 0x9a, 0x84, 0xe9, 0xc5, 0x56,
|
||||
0x4d, 0x95, 0x59, 0x30, 0x2e, 0x93, 0x2c, 0x2d, 0xdb, 0x6b, 0xdf, 0xc6, 0x4f, 0x0d, 0xaa, 0x62,
|
||||
0xaa, 0xce, 0x7c, 0xa7, 0x1b, 0xb9, 0x53, 0xc6, 0x31, 0xe9, 0x8f, 0xfc, 0x07, 0x3b, 0xfd, 0x24,
|
||||
0x9c, 0xca, 0x95, 0xd4, 0xff, 0x12, 0x58, 0x2a, 0x73, 0xe4, 0x33, 0x54, 0x42, 0xc9, 0xb1, 0x1c,
|
||||
0xc6, 0x99, 0x35, 0x66, 0xa1, 0x9e, 0x15, 0x5d, 0x36, 0x55, 0xf9, 0x2d, 0xda, 0xe6, 0xca, 0xfa,
|
||||
0x03, 0x0b, 0x2f, 0x7d, 0x1e, 0xcd, 0x69, 0x39, 0x5c, 0x0b, 0xd6, 0xbf, 0x40, 0x75, 0x0b, 0x8c,
|
||||
0x54, 0x20, 0x77, 0x8f, 0x73, 0xb1, 0xa9, 0x3c, 0x4d, 0x96, 0xc4, 0x84, 0x9d, 0x29, 0xf3, 0x26,
|
||||
0x72, 0xb0, 0x8a, 0x2d, 0xdd, 0x94, 0xe7, 0xad, 0x37, 0xeb, 0x4e, 0x39, 0x45, 0xe6, 0xdc, 0x46,
|
||||
0x2e, 0xc7, 0x1b, 0xe4, 0x54, 0xc2, 0x4e, 0xb3, 0x6f, 0x34, 0xe3, 0xb7, 0x06, 0x7b, 0x17, 0xe8,
|
||||
0xb9, 0x53, 0x8c, 0x28, 0xc6, 0x61, 0xe0, 0xc7, 0x48, 0x1a, 0x50, 0x88, 0x39, 0xe3, 0x93, 0x58,
|
||||
0x88, 0x97, 0x5b, 0x65, 0xd5, 0xf1, 0x8d, 0x88, 0x76, 0x32, 0x34, 0xcd, 0x93, 0xff, 0x95, 0x35,
|
||||
0xd9, 0x2d, 0xd6, 0x74, 0x32, 0xca, 0x9c, 0xb7, 0x50, 0x5e, 0x1c, 0x37, 0x89, 0xcf, 0x09, 0xfc,
|
||||
0xc1, 0xe6, 0x00, 0x28, 0x5e, 0x69, 0xb0, 0x76, 0xca, 0x29, 0xd4, 0x04, 0xcd, 0x62, 0xbe, 0x63,
|
||||
0xad, 0xda, 0x9c, 0xce, 0xf0, 0xd1, 0x13, 0x16, 0x77, 0x32, 0xb4, 0xda, 0x7f, 0x18, 0x4e, 0xa6,
|
||||
0x37, 0x39, 0x6a, 0xad, 0x1f, 0x1a, 0xfc, 0x95, 0x1a, 0x40, 0x4e, 0x97, 0xcb, 0x8a, 0x6a, 0xe5,
|
||||
0xd2, 0x9f, 0xa2, 0x17, 0x84, 0x58, 0x3f, 0x54, 0x45, 0x36, 0xec, 0x32, 0x32, 0x0d, 0xed, 0x95,
|
||||
0x46, 0xda, 0x0b, 0x1f, 0x55, 0x33, 0xcf, 0xd7, 0xb8, 0x86, 0x5a, 0x9a, 0xb8, 0x75, 0xf9, 0x68,
|
||||
0x75, 0x06, 0x9f, 0x2b, 0xd5, 0x66, 0x60, 0x04, 0xd1, 0xd0, 0x1c, 0xcd, 0x43, 0x8c, 0xe4, 0x1d,
|
||||
0x6c, 0x0e, 0x58, 0x3f, 0x72, 0x6d, 0x45, 0x4b, 0xae, 0xd8, 0x76, 0x49, 0x4c, 0x7e, 0xdc, 0x65,
|
||||
0xf6, 0x3d, 0x1b, 0xe2, 0xdd, 0xcb, 0xa1, 0xcb, 0x47, 0x93, 0x7e, 0x52, 0xab, 0xb9, 0xc2, 0x6c,
|
||||
0x4a, 0xe6, 0x89, 0x64, 0x9e, 0x0c, 0x83, 0x66, 0x42, 0xee, 0xcb, 0x8b, 0xff, 0xf5, 0x9f, 0x00,
|
||||
0x00, 0x00, 0xff, 0xff, 0xe9, 0x5f, 0xb5, 0x2a, 0x14, 0x06, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// DeliverClient is the client API for Deliver service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type DeliverClient interface {
|
||||
// Deliver first requires an Envelope of type ab.DELIVER_SEEK_INFO with
|
||||
// Payload data as a marshaled orderer.SeekInfo message,
|
||||
// then a stream of block replies is received
|
||||
Deliver(ctx context.Context, opts ...grpc.CallOption) (Deliver_DeliverClient, error)
|
||||
// DeliverFiltered first requires an Envelope of type ab.DELIVER_SEEK_INFO with
|
||||
// Payload data as a marshaled orderer.SeekInfo message,
|
||||
// then a stream of **filtered** block replies is received
|
||||
DeliverFiltered(ctx context.Context, opts ...grpc.CallOption) (Deliver_DeliverFilteredClient, error)
|
||||
// DeliverWithPrivateData first requires an Envelope of type ab.DELIVER_SEEK_INFO with
|
||||
// Payload data as a marshaled orderer.SeekInfo message,
|
||||
// then a stream of block and private data replies is received
|
||||
DeliverWithPrivateData(ctx context.Context, opts ...grpc.CallOption) (Deliver_DeliverWithPrivateDataClient, error)
|
||||
}
|
||||
|
||||
type deliverClient struct {
|
||||
cc *grpc.ClientConn
|
||||
}
|
||||
|
||||
func NewDeliverClient(cc *grpc.ClientConn) DeliverClient {
|
||||
return &deliverClient{cc}
|
||||
}
|
||||
|
||||
func (c *deliverClient) Deliver(ctx context.Context, opts ...grpc.CallOption) (Deliver_DeliverClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, &_Deliver_serviceDesc.Streams[0], "/protos.Deliver/Deliver", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &deliverDeliverClient{stream}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type Deliver_DeliverClient interface {
|
||||
Send(*common.Envelope) error
|
||||
Recv() (*DeliverResponse, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type deliverDeliverClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *deliverDeliverClient) Send(m *common.Envelope) error {
|
||||
return x.ClientStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *deliverDeliverClient) Recv() (*DeliverResponse, error) {
|
||||
m := new(DeliverResponse)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *deliverClient) DeliverFiltered(ctx context.Context, opts ...grpc.CallOption) (Deliver_DeliverFilteredClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, &_Deliver_serviceDesc.Streams[1], "/protos.Deliver/DeliverFiltered", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &deliverDeliverFilteredClient{stream}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type Deliver_DeliverFilteredClient interface {
|
||||
Send(*common.Envelope) error
|
||||
Recv() (*DeliverResponse, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type deliverDeliverFilteredClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *deliverDeliverFilteredClient) Send(m *common.Envelope) error {
|
||||
return x.ClientStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *deliverDeliverFilteredClient) Recv() (*DeliverResponse, error) {
|
||||
m := new(DeliverResponse)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *deliverClient) DeliverWithPrivateData(ctx context.Context, opts ...grpc.CallOption) (Deliver_DeliverWithPrivateDataClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, &_Deliver_serviceDesc.Streams[2], "/protos.Deliver/DeliverWithPrivateData", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &deliverDeliverWithPrivateDataClient{stream}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type Deliver_DeliverWithPrivateDataClient interface {
|
||||
Send(*common.Envelope) error
|
||||
Recv() (*DeliverResponse, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type deliverDeliverWithPrivateDataClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *deliverDeliverWithPrivateDataClient) Send(m *common.Envelope) error {
|
||||
return x.ClientStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *deliverDeliverWithPrivateDataClient) Recv() (*DeliverResponse, error) {
|
||||
m := new(DeliverResponse)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// DeliverServer is the server API for Deliver service.
|
||||
type DeliverServer interface {
|
||||
// Deliver first requires an Envelope of type ab.DELIVER_SEEK_INFO with
|
||||
// Payload data as a marshaled orderer.SeekInfo message,
|
||||
// then a stream of block replies is received
|
||||
Deliver(Deliver_DeliverServer) error
|
||||
// DeliverFiltered first requires an Envelope of type ab.DELIVER_SEEK_INFO with
|
||||
// Payload data as a marshaled orderer.SeekInfo message,
|
||||
// then a stream of **filtered** block replies is received
|
||||
DeliverFiltered(Deliver_DeliverFilteredServer) error
|
||||
// DeliverWithPrivateData first requires an Envelope of type ab.DELIVER_SEEK_INFO with
|
||||
// Payload data as a marshaled orderer.SeekInfo message,
|
||||
// then a stream of block and private data replies is received
|
||||
DeliverWithPrivateData(Deliver_DeliverWithPrivateDataServer) error
|
||||
}
|
||||
|
||||
// UnimplementedDeliverServer can be embedded to have forward compatible implementations.
|
||||
type UnimplementedDeliverServer struct {
|
||||
}
|
||||
|
||||
func (*UnimplementedDeliverServer) Deliver(srv Deliver_DeliverServer) error {
|
||||
return status.Errorf(codes.Unimplemented, "method Deliver not implemented")
|
||||
}
|
||||
func (*UnimplementedDeliverServer) DeliverFiltered(srv Deliver_DeliverFilteredServer) error {
|
||||
return status.Errorf(codes.Unimplemented, "method DeliverFiltered not implemented")
|
||||
}
|
||||
func (*UnimplementedDeliverServer) DeliverWithPrivateData(srv Deliver_DeliverWithPrivateDataServer) error {
|
||||
return status.Errorf(codes.Unimplemented, "method DeliverWithPrivateData not implemented")
|
||||
}
|
||||
|
||||
func RegisterDeliverServer(s *grpc.Server, srv DeliverServer) {
|
||||
s.RegisterService(&_Deliver_serviceDesc, srv)
|
||||
}
|
||||
|
||||
func _Deliver_Deliver_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
return srv.(DeliverServer).Deliver(&deliverDeliverServer{stream})
|
||||
}
|
||||
|
||||
type Deliver_DeliverServer interface {
|
||||
Send(*DeliverResponse) error
|
||||
Recv() (*common.Envelope, error)
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type deliverDeliverServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *deliverDeliverServer) Send(m *DeliverResponse) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *deliverDeliverServer) Recv() (*common.Envelope, error) {
|
||||
m := new(common.Envelope)
|
||||
if err := x.ServerStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func _Deliver_DeliverFiltered_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
return srv.(DeliverServer).DeliverFiltered(&deliverDeliverFilteredServer{stream})
|
||||
}
|
||||
|
||||
type Deliver_DeliverFilteredServer interface {
|
||||
Send(*DeliverResponse) error
|
||||
Recv() (*common.Envelope, error)
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type deliverDeliverFilteredServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *deliverDeliverFilteredServer) Send(m *DeliverResponse) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *deliverDeliverFilteredServer) Recv() (*common.Envelope, error) {
|
||||
m := new(common.Envelope)
|
||||
if err := x.ServerStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func _Deliver_DeliverWithPrivateData_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
return srv.(DeliverServer).DeliverWithPrivateData(&deliverDeliverWithPrivateDataServer{stream})
|
||||
}
|
||||
|
||||
type Deliver_DeliverWithPrivateDataServer interface {
|
||||
Send(*DeliverResponse) error
|
||||
Recv() (*common.Envelope, error)
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type deliverDeliverWithPrivateDataServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *deliverDeliverWithPrivateDataServer) Send(m *DeliverResponse) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *deliverDeliverWithPrivateDataServer) Recv() (*common.Envelope, error) {
|
||||
m := new(common.Envelope)
|
||||
if err := x.ServerStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
var _Deliver_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "protos.Deliver",
|
||||
HandlerType: (*DeliverServer)(nil),
|
||||
Methods: []grpc.MethodDesc{},
|
||||
Streams: []grpc.StreamDesc{
|
||||
{
|
||||
StreamName: "Deliver",
|
||||
Handler: _Deliver_Deliver_Handler,
|
||||
ServerStreams: true,
|
||||
ClientStreams: true,
|
||||
},
|
||||
{
|
||||
StreamName: "DeliverFiltered",
|
||||
Handler: _Deliver_DeliverFiltered_Handler,
|
||||
ServerStreams: true,
|
||||
ClientStreams: true,
|
||||
},
|
||||
{
|
||||
StreamName: "DeliverWithPrivateData",
|
||||
Handler: _Deliver_DeliverWithPrivateData_Handler,
|
||||
ServerStreams: true,
|
||||
ClientStreams: true,
|
||||
},
|
||||
},
|
||||
Metadata: "peer/events.proto",
|
||||
}
|
@ -0,0 +1,123 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: peer/peer.proto
|
||||
|
||||
package peer
|
||||
|
||||
import (
|
||||
context "context"
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
func init() { proto.RegisterFile("peer/peer.proto", fileDescriptor_c302117fbb08ad42) }
|
||||
|
||||
var fileDescriptor_c302117fbb08ad42 = []byte{
|
||||
// 177 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x8f, 0xb1, 0x0a, 0xc2, 0x30,
|
||||
0x10, 0x86, 0x37, 0x91, 0x2c, 0x85, 0x0a, 0x22, 0xc5, 0xc9, 0xd9, 0xa6, 0xa0, 0x6f, 0xa0, 0x38,
|
||||
0x5b, 0xea, 0xe6, 0x22, 0x6d, 0x73, 0xa6, 0x81, 0x9a, 0x0b, 0x77, 0x75, 0xf0, 0xed, 0xa5, 0xbd,
|
||||
0x06, 0x74, 0x49, 0xe0, 0xfb, 0xbf, 0x3b, 0xee, 0x57, 0x49, 0x00, 0xa0, 0x62, 0x7c, 0x74, 0x20,
|
||||
0x1c, 0x30, 0x5d, 0x4c, 0x1f, 0x67, 0x2b, 0x09, 0x08, 0x03, 0x72, 0xdd, 0x4b, 0x98, 0x6d, 0xff,
|
||||
0xe0, 0x83, 0x80, 0x03, 0x7a, 0x06, 0x49, 0x0f, 0x57, 0xb5, 0xbc, 0x78, 0x83, 0xc4, 0x40, 0xe9,
|
||||
0x59, 0x25, 0x25, 0x61, 0x0b, 0xcc, 0xe5, 0x6c, 0xa7, 0x6b, 0xd1, 0x58, 0xdf, 0x9c, 0xf5, 0x60,
|
||||
0x22, 0xcf, 0x36, 0x91, 0x47, 0x52, 0xcd, 0x6b, 0x4f, 0x95, 0xda, 0x21, 0x59, 0xdd, 0x7d, 0x02,
|
||||
0x50, 0x0f, 0xc6, 0x02, 0xe9, 0x67, 0xdd, 0x90, 0x6b, 0xe3, 0xc4, 0x78, 0xce, 0x7d, 0x6f, 0xdd,
|
||||
0xd0, 0xbd, 0x1b, 0xdd, 0xe2, 0xab, 0xf8, 0x51, 0x0b, 0x51, 0x73, 0x51, 0x73, 0x8b, 0x53, 0xcb,
|
||||
0x46, 0xfa, 0x1d, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xd5, 0x12, 0xef, 0x66, 0xf9, 0x00, 0x00,
|
||||
0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// EndorserClient is the client API for Endorser service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type EndorserClient interface {
|
||||
ProcessProposal(ctx context.Context, in *SignedProposal, opts ...grpc.CallOption) (*ProposalResponse, error)
|
||||
}
|
||||
|
||||
type endorserClient struct {
|
||||
cc *grpc.ClientConn
|
||||
}
|
||||
|
||||
func NewEndorserClient(cc *grpc.ClientConn) EndorserClient {
|
||||
return &endorserClient{cc}
|
||||
}
|
||||
|
||||
func (c *endorserClient) ProcessProposal(ctx context.Context, in *SignedProposal, opts ...grpc.CallOption) (*ProposalResponse, error) {
|
||||
out := new(ProposalResponse)
|
||||
err := c.cc.Invoke(ctx, "/protos.Endorser/ProcessProposal", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// EndorserServer is the server API for Endorser service.
|
||||
type EndorserServer interface {
|
||||
ProcessProposal(context.Context, *SignedProposal) (*ProposalResponse, error)
|
||||
}
|
||||
|
||||
// UnimplementedEndorserServer can be embedded to have forward compatible implementations.
|
||||
type UnimplementedEndorserServer struct {
|
||||
}
|
||||
|
||||
func (*UnimplementedEndorserServer) ProcessProposal(ctx context.Context, req *SignedProposal) (*ProposalResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ProcessProposal not implemented")
|
||||
}
|
||||
|
||||
func RegisterEndorserServer(s *grpc.Server, srv EndorserServer) {
|
||||
s.RegisterService(&_Endorser_serviceDesc, srv)
|
||||
}
|
||||
|
||||
func _Endorser_ProcessProposal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(SignedProposal)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(EndorserServer).ProcessProposal(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/protos.Endorser/ProcessProposal",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(EndorserServer).ProcessProposal(ctx, req.(*SignedProposal))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
var _Endorser_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "protos.Endorser",
|
||||
HandlerType: (*EndorserServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "ProcessProposal",
|
||||
Handler: _Endorser_ProcessProposal_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "peer/peer.proto",
|
||||
}
|
@ -0,0 +1,130 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: peer/policy.proto
|
||||
|
||||
package peer
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
common "github.com/hyperledger/fabric-protos-go/common"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// ApplicationPolicy captures the diffenrent policy types that
|
||||
// are set and evaluted at the application level.
|
||||
type ApplicationPolicy struct {
|
||||
// Types that are valid to be assigned to Type:
|
||||
// *ApplicationPolicy_SignaturePolicy
|
||||
// *ApplicationPolicy_ChannelConfigPolicyReference
|
||||
Type isApplicationPolicy_Type `protobuf_oneof:"Type"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ApplicationPolicy) Reset() { *m = ApplicationPolicy{} }
|
||||
func (m *ApplicationPolicy) String() string { return proto.CompactTextString(m) }
|
||||
func (*ApplicationPolicy) ProtoMessage() {}
|
||||
func (*ApplicationPolicy) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_17aa1dd1e55c3e19, []int{0}
|
||||
}
|
||||
|
||||
func (m *ApplicationPolicy) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ApplicationPolicy.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ApplicationPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ApplicationPolicy.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ApplicationPolicy) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ApplicationPolicy.Merge(m, src)
|
||||
}
|
||||
func (m *ApplicationPolicy) XXX_Size() int {
|
||||
return xxx_messageInfo_ApplicationPolicy.Size(m)
|
||||
}
|
||||
func (m *ApplicationPolicy) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ApplicationPolicy.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ApplicationPolicy proto.InternalMessageInfo
|
||||
|
||||
type isApplicationPolicy_Type interface {
|
||||
isApplicationPolicy_Type()
|
||||
}
|
||||
|
||||
type ApplicationPolicy_SignaturePolicy struct {
|
||||
SignaturePolicy *common.SignaturePolicyEnvelope `protobuf:"bytes,1,opt,name=signature_policy,json=signaturePolicy,proto3,oneof"`
|
||||
}
|
||||
|
||||
type ApplicationPolicy_ChannelConfigPolicyReference struct {
|
||||
ChannelConfigPolicyReference string `protobuf:"bytes,2,opt,name=channel_config_policy_reference,json=channelConfigPolicyReference,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*ApplicationPolicy_SignaturePolicy) isApplicationPolicy_Type() {}
|
||||
|
||||
func (*ApplicationPolicy_ChannelConfigPolicyReference) isApplicationPolicy_Type() {}
|
||||
|
||||
func (m *ApplicationPolicy) GetType() isApplicationPolicy_Type {
|
||||
if m != nil {
|
||||
return m.Type
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ApplicationPolicy) GetSignaturePolicy() *common.SignaturePolicyEnvelope {
|
||||
if x, ok := m.GetType().(*ApplicationPolicy_SignaturePolicy); ok {
|
||||
return x.SignaturePolicy
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ApplicationPolicy) GetChannelConfigPolicyReference() string {
|
||||
if x, ok := m.GetType().(*ApplicationPolicy_ChannelConfigPolicyReference); ok {
|
||||
return x.ChannelConfigPolicyReference
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// XXX_OneofWrappers is for the internal use of the proto package.
|
||||
func (*ApplicationPolicy) XXX_OneofWrappers() []interface{} {
|
||||
return []interface{}{
|
||||
(*ApplicationPolicy_SignaturePolicy)(nil),
|
||||
(*ApplicationPolicy_ChannelConfigPolicyReference)(nil),
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*ApplicationPolicy)(nil), "protos.ApplicationPolicy")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("peer/policy.proto", fileDescriptor_17aa1dd1e55c3e19) }
|
||||
|
||||
var fileDescriptor_17aa1dd1e55c3e19 = []byte{
|
||||
// 243 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x90, 0xc1, 0x4a, 0xc3, 0x40,
|
||||
0x10, 0x86, 0x1b, 0x91, 0x82, 0xeb, 0x41, 0x1b, 0x10, 0x8a, 0x08, 0x2d, 0x3d, 0xf5, 0x60, 0x37,
|
||||
0xa0, 0x4f, 0x60, 0x45, 0xec, 0xc1, 0x83, 0x44, 0x4f, 0x5e, 0x42, 0xb2, 0x4e, 0x36, 0x0b, 0xdb,
|
||||
0x9d, 0x61, 0x36, 0x15, 0xf2, 0x5a, 0x3e, 0xa1, 0x24, 0xd3, 0x80, 0x3d, 0xed, 0xe1, 0xfb, 0xfe,
|
||||
0x9f, 0x9d, 0x5f, 0xcd, 0x08, 0x80, 0x33, 0x42, 0xef, 0x4c, 0xa7, 0x89, 0xb1, 0xc5, 0x74, 0x3a,
|
||||
0x3c, 0xf1, 0xf6, 0xc6, 0xe0, 0x7e, 0x8f, 0x41, 0xa0, 0x83, 0x28, 0x78, 0xf5, 0x9b, 0xa8, 0xd9,
|
||||
0x13, 0x91, 0x77, 0xa6, 0x6c, 0x1d, 0x86, 0xf7, 0x21, 0x9a, 0xbe, 0xa9, 0xeb, 0xe8, 0x6c, 0x28,
|
||||
0xdb, 0x03, 0x43, 0x21, 0x75, 0xf3, 0x64, 0x99, 0xac, 0x2f, 0x1f, 0x16, 0x5a, 0x7a, 0xf4, 0xc7,
|
||||
0xc8, 0x25, 0xf2, 0x12, 0x7e, 0xc0, 0x23, 0xc1, 0x6e, 0x92, 0x5f, 0xc5, 0x53, 0x94, 0xbe, 0xaa,
|
||||
0x85, 0x69, 0xca, 0x10, 0xc0, 0x17, 0x06, 0x43, 0xed, 0xec, 0xb1, 0xb2, 0x60, 0xa8, 0x81, 0x21,
|
||||
0x18, 0x98, 0x9f, 0x2d, 0x93, 0xf5, 0xc5, 0x6e, 0x92, 0xdf, 0x1d, 0xc5, 0xe7, 0xc1, 0x93, 0x7c,
|
||||
0x3e, 0x5a, 0xdb, 0xa9, 0x3a, 0xff, 0xec, 0x08, 0xb6, 0xb9, 0x5a, 0x21, 0x5b, 0xdd, 0x74, 0x04,
|
||||
0xec, 0xe1, 0xdb, 0x02, 0xeb, 0xba, 0xac, 0xd8, 0x19, 0x39, 0x2a, 0xea, 0x7e, 0x86, 0xaf, 0x7b,
|
||||
0xeb, 0xda, 0xe6, 0x50, 0xf5, 0x1f, 0xce, 0xfe, 0xa9, 0x99, 0xa8, 0x1b, 0x51, 0x37, 0x16, 0xb3,
|
||||
0xde, 0xae, 0x64, 0xa7, 0xc7, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x89, 0x76, 0x3a, 0x03, 0x43,
|
||||
0x01, 0x00, 0x00,
|
||||
}
|
@ -0,0 +1,394 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: peer/proposal.proto
|
||||
|
||||
package peer
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// This structure is necessary to sign the proposal which contains the header
|
||||
// and the payload. Without this structure, we would have to concatenate the
|
||||
// header and the payload to verify the signature, which could be expensive
|
||||
// with large payload
|
||||
//
|
||||
// When an endorser receives a SignedProposal message, it should verify the
|
||||
// signature over the proposal bytes. This verification requires the following
|
||||
// steps:
|
||||
// 1. Verification of the validity of the certificate that was used to produce
|
||||
// the signature. The certificate will be available once proposalBytes has
|
||||
// been unmarshalled to a Proposal message, and Proposal.header has been
|
||||
// unmarshalled to a Header message. While this unmarshalling-before-verifying
|
||||
// might not be ideal, it is unavoidable because i) the signature needs to also
|
||||
// protect the signing certificate; ii) it is desirable that Header is created
|
||||
// once by the client and never changed (for the sake of accountability and
|
||||
// non-repudiation). Note also that it is actually impossible to conclusively
|
||||
// verify the validity of the certificate included in a Proposal, because the
|
||||
// proposal needs to first be endorsed and ordered with respect to certificate
|
||||
// expiration transactions. Still, it is useful to pre-filter expired
|
||||
// certificates at this stage.
|
||||
// 2. Verification that the certificate is trusted (signed by a trusted CA) and
|
||||
// that it is allowed to transact with us (with respect to some ACLs);
|
||||
// 3. Verification that the signature on proposalBytes is valid;
|
||||
// 4. Detect replay attacks;
|
||||
type SignedProposal struct {
|
||||
// The bytes of Proposal
|
||||
ProposalBytes []byte `protobuf:"bytes,1,opt,name=proposal_bytes,json=proposalBytes,proto3" json:"proposal_bytes,omitempty"`
|
||||
// Signaure over proposalBytes; this signature is to be verified against
|
||||
// the creator identity contained in the header of the Proposal message
|
||||
// marshaled as proposalBytes
|
||||
Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *SignedProposal) Reset() { *m = SignedProposal{} }
|
||||
func (m *SignedProposal) String() string { return proto.CompactTextString(m) }
|
||||
func (*SignedProposal) ProtoMessage() {}
|
||||
func (*SignedProposal) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_c4dbb4372a94bd5b, []int{0}
|
||||
}
|
||||
|
||||
func (m *SignedProposal) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_SignedProposal.Unmarshal(m, b)
|
||||
}
|
||||
func (m *SignedProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_SignedProposal.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *SignedProposal) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_SignedProposal.Merge(m, src)
|
||||
}
|
||||
func (m *SignedProposal) XXX_Size() int {
|
||||
return xxx_messageInfo_SignedProposal.Size(m)
|
||||
}
|
||||
func (m *SignedProposal) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_SignedProposal.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_SignedProposal proto.InternalMessageInfo
|
||||
|
||||
func (m *SignedProposal) GetProposalBytes() []byte {
|
||||
if m != nil {
|
||||
return m.ProposalBytes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *SignedProposal) GetSignature() []byte {
|
||||
if m != nil {
|
||||
return m.Signature
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A Proposal is sent to an endorser for endorsement. The proposal contains:
|
||||
// 1. A header which should be unmarshaled to a Header message. Note that
|
||||
// Header is both the header of a Proposal and of a Transaction, in that i)
|
||||
// both headers should be unmarshaled to this message; and ii) it is used to
|
||||
// compute cryptographic hashes and signatures. The header has fields common
|
||||
// to all proposals/transactions. In addition it has a type field for
|
||||
// additional customization. An example of this is the ChaincodeHeaderExtension
|
||||
// message used to extend the Header for type CHAINCODE.
|
||||
// 2. A payload whose type depends on the header's type field.
|
||||
// 3. An extension whose type depends on the header's type field.
|
||||
//
|
||||
// Let us see an example. For type CHAINCODE (see the Header message),
|
||||
// we have the following:
|
||||
// 1. The header is a Header message whose extensions field is a
|
||||
// ChaincodeHeaderExtension message.
|
||||
// 2. The payload is a ChaincodeProposalPayload message.
|
||||
// 3. The extension is a ChaincodeAction that might be used to ask the
|
||||
// endorsers to endorse a specific ChaincodeAction, thus emulating the
|
||||
// submitting peer model.
|
||||
type Proposal struct {
|
||||
// The header of the proposal. It is the bytes of the Header
|
||||
Header []byte `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
|
||||
// The payload of the proposal as defined by the type in the proposal
|
||||
// header.
|
||||
Payload []byte `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"`
|
||||
// Optional extensions to the proposal. Its content depends on the Header's
|
||||
// type field. For the type CHAINCODE, it might be the bytes of a
|
||||
// ChaincodeAction message.
|
||||
Extension []byte `protobuf:"bytes,3,opt,name=extension,proto3" json:"extension,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Proposal) Reset() { *m = Proposal{} }
|
||||
func (m *Proposal) String() string { return proto.CompactTextString(m) }
|
||||
func (*Proposal) ProtoMessage() {}
|
||||
func (*Proposal) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_c4dbb4372a94bd5b, []int{1}
|
||||
}
|
||||
|
||||
func (m *Proposal) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Proposal.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Proposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Proposal.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Proposal) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Proposal.Merge(m, src)
|
||||
}
|
||||
func (m *Proposal) XXX_Size() int {
|
||||
return xxx_messageInfo_Proposal.Size(m)
|
||||
}
|
||||
func (m *Proposal) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Proposal.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Proposal proto.InternalMessageInfo
|
||||
|
||||
func (m *Proposal) GetHeader() []byte {
|
||||
if m != nil {
|
||||
return m.Header
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Proposal) GetPayload() []byte {
|
||||
if m != nil {
|
||||
return m.Payload
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Proposal) GetExtension() []byte {
|
||||
if m != nil {
|
||||
return m.Extension
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ChaincodeHeaderExtension is the Header's extentions message to be used when
|
||||
// the Header's type is CHAINCODE. This extensions is used to specify which
|
||||
// chaincode to invoke and what should appear on the ledger.
|
||||
type ChaincodeHeaderExtension struct {
|
||||
// The ID of the chaincode to target.
|
||||
ChaincodeId *ChaincodeID `protobuf:"bytes,2,opt,name=chaincode_id,json=chaincodeId,proto3" json:"chaincode_id,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ChaincodeHeaderExtension) Reset() { *m = ChaincodeHeaderExtension{} }
|
||||
func (m *ChaincodeHeaderExtension) String() string { return proto.CompactTextString(m) }
|
||||
func (*ChaincodeHeaderExtension) ProtoMessage() {}
|
||||
func (*ChaincodeHeaderExtension) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_c4dbb4372a94bd5b, []int{2}
|
||||
}
|
||||
|
||||
func (m *ChaincodeHeaderExtension) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ChaincodeHeaderExtension.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ChaincodeHeaderExtension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ChaincodeHeaderExtension.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ChaincodeHeaderExtension) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ChaincodeHeaderExtension.Merge(m, src)
|
||||
}
|
||||
func (m *ChaincodeHeaderExtension) XXX_Size() int {
|
||||
return xxx_messageInfo_ChaincodeHeaderExtension.Size(m)
|
||||
}
|
||||
func (m *ChaincodeHeaderExtension) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ChaincodeHeaderExtension.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ChaincodeHeaderExtension proto.InternalMessageInfo
|
||||
|
||||
func (m *ChaincodeHeaderExtension) GetChaincodeId() *ChaincodeID {
|
||||
if m != nil {
|
||||
return m.ChaincodeId
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ChaincodeProposalPayload is the Proposal's payload message to be used when
|
||||
// the Header's type is CHAINCODE. It contains the arguments for this
|
||||
// invocation.
|
||||
type ChaincodeProposalPayload struct {
|
||||
// Input contains the arguments for this invocation. If this invocation
|
||||
// deploys a new chaincode, ESCC/VSCC are part of this field.
|
||||
// This is usually a marshaled ChaincodeInvocationSpec
|
||||
Input []byte `protobuf:"bytes,1,opt,name=input,proto3" json:"input,omitempty"`
|
||||
// TransientMap contains data (e.g. cryptographic material) that might be used
|
||||
// to implement some form of application-level confidentiality. The contents
|
||||
// of this field are supposed to always be omitted from the transaction and
|
||||
// excluded from the ledger.
|
||||
TransientMap map[string][]byte `protobuf:"bytes,2,rep,name=TransientMap,proto3" json:"TransientMap,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ChaincodeProposalPayload) Reset() { *m = ChaincodeProposalPayload{} }
|
||||
func (m *ChaincodeProposalPayload) String() string { return proto.CompactTextString(m) }
|
||||
func (*ChaincodeProposalPayload) ProtoMessage() {}
|
||||
func (*ChaincodeProposalPayload) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_c4dbb4372a94bd5b, []int{3}
|
||||
}
|
||||
|
||||
func (m *ChaincodeProposalPayload) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ChaincodeProposalPayload.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ChaincodeProposalPayload) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ChaincodeProposalPayload.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ChaincodeProposalPayload) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ChaincodeProposalPayload.Merge(m, src)
|
||||
}
|
||||
func (m *ChaincodeProposalPayload) XXX_Size() int {
|
||||
return xxx_messageInfo_ChaincodeProposalPayload.Size(m)
|
||||
}
|
||||
func (m *ChaincodeProposalPayload) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ChaincodeProposalPayload.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ChaincodeProposalPayload proto.InternalMessageInfo
|
||||
|
||||
func (m *ChaincodeProposalPayload) GetInput() []byte {
|
||||
if m != nil {
|
||||
return m.Input
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ChaincodeProposalPayload) GetTransientMap() map[string][]byte {
|
||||
if m != nil {
|
||||
return m.TransientMap
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ChaincodeAction contains the executed chaincode results, response, and event.
|
||||
type ChaincodeAction struct {
|
||||
// This field contains the read set and the write set produced by the
|
||||
// chaincode executing this invocation.
|
||||
Results []byte `protobuf:"bytes,1,opt,name=results,proto3" json:"results,omitempty"`
|
||||
// This field contains the event generated by the chaincode.
|
||||
// Only a single marshaled ChaincodeEvent is included.
|
||||
Events []byte `protobuf:"bytes,2,opt,name=events,proto3" json:"events,omitempty"`
|
||||
// This field contains the result of executing this invocation.
|
||||
Response *Response `protobuf:"bytes,3,opt,name=response,proto3" json:"response,omitempty"`
|
||||
// This field contains the ChaincodeID of executing this invocation. Endorser
|
||||
// will set it with the ChaincodeID called by endorser while simulating proposal.
|
||||
// Committer will validate the version matching with latest chaincode version.
|
||||
// Adding ChaincodeID to keep version opens up the possibility of multiple
|
||||
// ChaincodeAction per transaction.
|
||||
ChaincodeId *ChaincodeID `protobuf:"bytes,4,opt,name=chaincode_id,json=chaincodeId,proto3" json:"chaincode_id,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ChaincodeAction) Reset() { *m = ChaincodeAction{} }
|
||||
func (m *ChaincodeAction) String() string { return proto.CompactTextString(m) }
|
||||
func (*ChaincodeAction) ProtoMessage() {}
|
||||
func (*ChaincodeAction) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_c4dbb4372a94bd5b, []int{4}
|
||||
}
|
||||
|
||||
func (m *ChaincodeAction) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ChaincodeAction.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ChaincodeAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ChaincodeAction.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ChaincodeAction) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ChaincodeAction.Merge(m, src)
|
||||
}
|
||||
func (m *ChaincodeAction) XXX_Size() int {
|
||||
return xxx_messageInfo_ChaincodeAction.Size(m)
|
||||
}
|
||||
func (m *ChaincodeAction) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ChaincodeAction.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ChaincodeAction proto.InternalMessageInfo
|
||||
|
||||
func (m *ChaincodeAction) GetResults() []byte {
|
||||
if m != nil {
|
||||
return m.Results
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ChaincodeAction) GetEvents() []byte {
|
||||
if m != nil {
|
||||
return m.Events
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ChaincodeAction) GetResponse() *Response {
|
||||
if m != nil {
|
||||
return m.Response
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ChaincodeAction) GetChaincodeId() *ChaincodeID {
|
||||
if m != nil {
|
||||
return m.ChaincodeId
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*SignedProposal)(nil), "protos.SignedProposal")
|
||||
proto.RegisterType((*Proposal)(nil), "protos.Proposal")
|
||||
proto.RegisterType((*ChaincodeHeaderExtension)(nil), "protos.ChaincodeHeaderExtension")
|
||||
proto.RegisterType((*ChaincodeProposalPayload)(nil), "protos.ChaincodeProposalPayload")
|
||||
proto.RegisterMapType((map[string][]byte)(nil), "protos.ChaincodeProposalPayload.TransientMapEntry")
|
||||
proto.RegisterType((*ChaincodeAction)(nil), "protos.ChaincodeAction")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("peer/proposal.proto", fileDescriptor_c4dbb4372a94bd5b) }
|
||||
|
||||
var fileDescriptor_c4dbb4372a94bd5b = []byte{
|
||||
// 462 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0xcf, 0x6b, 0xdb, 0x30,
|
||||
0x18, 0xc5, 0x69, 0x9b, 0xa6, 0x5f, 0xb2, 0xd6, 0x75, 0xcb, 0x30, 0xa1, 0x87, 0x62, 0x18, 0xf4,
|
||||
0xd0, 0x3a, 0x90, 0xc1, 0x18, 0xbb, 0x8c, 0x65, 0x2b, 0xac, 0x83, 0x41, 0xf1, 0x7e, 0x1c, 0x7a,
|
||||
0x09, 0xb2, 0xfd, 0xcd, 0x11, 0xf1, 0x24, 0x21, 0xc9, 0x61, 0xfe, 0xf3, 0x76, 0xdc, 0x7f, 0x35,
|
||||
0x64, 0x49, 0x6e, 0xba, 0x5c, 0x76, 0x4a, 0xbe, 0x1f, 0xef, 0xe9, 0x3d, 0x3d, 0x19, 0xce, 0x04,
|
||||
0xa2, 0x9c, 0x09, 0xc9, 0x05, 0x57, 0xa4, 0x4e, 0x85, 0xe4, 0x9a, 0x47, 0xc3, 0xee, 0x47, 0x4d,
|
||||
0xcf, 0xbb, 0x61, 0xb1, 0x22, 0x94, 0x15, 0xbc, 0x44, 0x3b, 0x9d, 0x5e, 0x3c, 0x81, 0x2c, 0x25,
|
||||
0x2a, 0xc1, 0x99, 0x72, 0xd3, 0xe4, 0x1b, 0x1c, 0x7f, 0xa1, 0x15, 0xc3, 0xf2, 0xde, 0x2d, 0x44,
|
||||
0x2f, 0xe0, 0xb8, 0x5f, 0xce, 0x5b, 0x8d, 0x2a, 0x0e, 0x2e, 0x83, 0xab, 0x49, 0xf6, 0xcc, 0x77,
|
||||
0x17, 0xa6, 0x19, 0x5d, 0xc0, 0x91, 0xa2, 0x15, 0x23, 0xba, 0x91, 0x18, 0x0f, 0xba, 0x8d, 0xc7,
|
||||
0x46, 0xf2, 0x00, 0xa3, 0x9e, 0xf0, 0x39, 0x0c, 0x57, 0x48, 0x4a, 0x94, 0x8e, 0xc8, 0x55, 0x51,
|
||||
0x0c, 0x87, 0x82, 0xb4, 0x35, 0x27, 0xa5, 0xc3, 0xfb, 0xd2, 0x70, 0xe3, 0x2f, 0x8d, 0x4c, 0x51,
|
||||
0xce, 0xe2, 0x3d, 0xcb, 0xdd, 0x37, 0x92, 0x35, 0xc4, 0xef, 0xbd, 0xc7, 0x8f, 0x1d, 0xd5, 0xad,
|
||||
0x9f, 0x45, 0xaf, 0x60, 0xd2, 0xfb, 0x5f, 0x52, 0x4b, 0x3c, 0x9e, 0x9f, 0x59, 0xb3, 0x2a, 0xed,
|
||||
0x71, 0x77, 0x1f, 0xb2, 0x71, 0xbf, 0x78, 0x57, 0x7e, 0xda, 0x1f, 0x05, 0xe1, 0x20, 0x3b, 0x75,
|
||||
0x02, 0x96, 0x1b, 0xaa, 0x72, 0x5a, 0x53, 0xdd, 0x26, 0x7f, 0x82, 0xad, 0xd3, 0xbc, 0xa5, 0x7b,
|
||||
0xa7, 0xf3, 0x1c, 0x0e, 0x28, 0x13, 0x8d, 0x76, 0xc6, 0x6c, 0x11, 0x7d, 0x87, 0xc9, 0x57, 0x49,
|
||||
0x98, 0xa2, 0xc8, 0xf4, 0x67, 0x22, 0xe2, 0xc1, 0xe5, 0xde, 0xd5, 0x78, 0x3e, 0xdf, 0xd1, 0xf0,
|
||||
0x0f, 0x5b, 0xba, 0x0d, 0xba, 0x65, 0x5a, 0xb6, 0xd9, 0x13, 0x9e, 0xe9, 0x5b, 0x38, 0xdd, 0x59,
|
||||
0x89, 0x42, 0xd8, 0x5b, 0x63, 0xdb, 0x09, 0x38, 0xca, 0xcc, 0x5f, 0x23, 0x6a, 0x43, 0xea, 0xc6,
|
||||
0x87, 0x62, 0x8b, 0x37, 0x83, 0xd7, 0x41, 0xf2, 0x3b, 0x80, 0x93, 0xfe, 0xf4, 0x77, 0x85, 0x36,
|
||||
0x17, 0x16, 0xc3, 0xa1, 0x44, 0xd5, 0xd4, 0xda, 0xc7, 0xec, 0x4b, 0x13, 0x1b, 0x6e, 0x90, 0x69,
|
||||
0xe5, 0x88, 0x5c, 0x15, 0x5d, 0xc3, 0xc8, 0xbf, 0xa1, 0x2e, 0x9b, 0xf1, 0x3c, 0xf4, 0xd6, 0x32,
|
||||
0xd7, 0xcf, 0xfa, 0x8d, 0x9d, 0x40, 0xf6, 0xff, 0x3b, 0x90, 0x83, 0x70, 0x98, 0x85, 0x9a, 0xaf,
|
||||
0x91, 0x2d, 0xb9, 0x40, 0x49, 0x8c, 0x5c, 0xb5, 0x28, 0x20, 0xe1, 0xb2, 0x4a, 0x57, 0xad, 0x40,
|
||||
0x59, 0x63, 0x59, 0xa1, 0x4c, 0x7f, 0x90, 0x5c, 0xd2, 0xc2, 0x33, 0x9a, 0xd7, 0xbe, 0x38, 0x79,
|
||||
0xbc, 0xdb, 0x62, 0x4d, 0x2a, 0x7c, 0xb8, 0xae, 0xa8, 0x5e, 0x35, 0x79, 0x5a, 0xf0, 0x9f, 0xb3,
|
||||
0x2d, 0xec, 0xcc, 0x62, 0x6f, 0x2c, 0xf6, 0xa6, 0xe2, 0x33, 0x03, 0xcf, 0xed, 0x07, 0xf5, 0xf2,
|
||||
0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe5, 0xf4, 0xc9, 0x9a, 0x6e, 0x03, 0x00, 0x00,
|
||||
}
|
327
chaincode/vendor/github.com/hyperledger/fabric-protos-go/peer/proposal_response.pb.go
generated
vendored
327
chaincode/vendor/github.com/hyperledger/fabric-protos-go/peer/proposal_response.pb.go
generated
vendored
@ -0,0 +1,327 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: peer/proposal_response.proto
|
||||
|
||||
package peer
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
timestamp "github.com/golang/protobuf/ptypes/timestamp"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// A ProposalResponse is returned from an endorser to the proposal submitter.
|
||||
// The idea is that this message contains the endorser's response to the
|
||||
// request of a client to perform an action over a chaincode (or more
|
||||
// generically on the ledger); the response might be success/error (conveyed in
|
||||
// the Response field) together with a description of the action and a
|
||||
// signature over it by that endorser. If a sufficient number of distinct
|
||||
// endorsers agree on the same action and produce signature to that effect, a
|
||||
// transaction can be generated and sent for ordering.
|
||||
type ProposalResponse struct {
|
||||
// Version indicates message protocol version
|
||||
Version int32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"`
|
||||
// Timestamp is the time that the message
|
||||
// was created as defined by the sender
|
||||
Timestamp *timestamp.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
||||
// A response message indicating whether the
|
||||
// endorsement of the action was successful
|
||||
Response *Response `protobuf:"bytes,4,opt,name=response,proto3" json:"response,omitempty"`
|
||||
// The payload of response. It is the bytes of ProposalResponsePayload
|
||||
Payload []byte `protobuf:"bytes,5,opt,name=payload,proto3" json:"payload,omitempty"`
|
||||
// The endorsement of the proposal, basically
|
||||
// the endorser's signature over the payload
|
||||
Endorsement *Endorsement `protobuf:"bytes,6,opt,name=endorsement,proto3" json:"endorsement,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ProposalResponse) Reset() { *m = ProposalResponse{} }
|
||||
func (m *ProposalResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ProposalResponse) ProtoMessage() {}
|
||||
func (*ProposalResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_2ed51030656d961a, []int{0}
|
||||
}
|
||||
|
||||
func (m *ProposalResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ProposalResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ProposalResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ProposalResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ProposalResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ProposalResponse.Merge(m, src)
|
||||
}
|
||||
func (m *ProposalResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_ProposalResponse.Size(m)
|
||||
}
|
||||
func (m *ProposalResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ProposalResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ProposalResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *ProposalResponse) GetVersion() int32 {
|
||||
if m != nil {
|
||||
return m.Version
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ProposalResponse) GetTimestamp() *timestamp.Timestamp {
|
||||
if m != nil {
|
||||
return m.Timestamp
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ProposalResponse) GetResponse() *Response {
|
||||
if m != nil {
|
||||
return m.Response
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ProposalResponse) GetPayload() []byte {
|
||||
if m != nil {
|
||||
return m.Payload
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ProposalResponse) GetEndorsement() *Endorsement {
|
||||
if m != nil {
|
||||
return m.Endorsement
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A response with a representation similar to an HTTP response that can
|
||||
// be used within another message.
|
||||
type Response struct {
|
||||
// A status code that should follow the HTTP status codes.
|
||||
Status int32 `protobuf:"varint,1,opt,name=status,proto3" json:"status,omitempty"`
|
||||
// A message associated with the response code.
|
||||
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
|
||||
// A payload that can be used to include metadata with this response.
|
||||
Payload []byte `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Response) Reset() { *m = Response{} }
|
||||
func (m *Response) String() string { return proto.CompactTextString(m) }
|
||||
func (*Response) ProtoMessage() {}
|
||||
func (*Response) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_2ed51030656d961a, []int{1}
|
||||
}
|
||||
|
||||
func (m *Response) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Response.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Response.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Response) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Response.Merge(m, src)
|
||||
}
|
||||
func (m *Response) XXX_Size() int {
|
||||
return xxx_messageInfo_Response.Size(m)
|
||||
}
|
||||
func (m *Response) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Response.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Response proto.InternalMessageInfo
|
||||
|
||||
func (m *Response) GetStatus() int32 {
|
||||
if m != nil {
|
||||
return m.Status
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Response) GetMessage() string {
|
||||
if m != nil {
|
||||
return m.Message
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Response) GetPayload() []byte {
|
||||
if m != nil {
|
||||
return m.Payload
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProposalResponsePayload is the payload of a proposal response. This message
|
||||
// is the "bridge" between the client's request and the endorser's action in
|
||||
// response to that request. Concretely, for chaincodes, it contains a hashed
|
||||
// representation of the proposal (proposalHash) and a representation of the
|
||||
// chaincode state changes and events inside the extension field.
|
||||
type ProposalResponsePayload struct {
|
||||
// Hash of the proposal that triggered this response. The hash is used to
|
||||
// link a response with its proposal, both for bookeeping purposes on an
|
||||
// asynchronous system and for security reasons (accountability,
|
||||
// non-repudiation). The hash usually covers the entire Proposal message
|
||||
// (byte-by-byte).
|
||||
ProposalHash []byte `protobuf:"bytes,1,opt,name=proposal_hash,json=proposalHash,proto3" json:"proposal_hash,omitempty"`
|
||||
// Extension should be unmarshaled to a type-specific message. The type of
|
||||
// the extension in any proposal response depends on the type of the proposal
|
||||
// that the client selected when the proposal was initially sent out. In
|
||||
// particular, this information is stored in the type field of a Header. For
|
||||
// chaincode, it's a ChaincodeAction message
|
||||
Extension []byte `protobuf:"bytes,2,opt,name=extension,proto3" json:"extension,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ProposalResponsePayload) Reset() { *m = ProposalResponsePayload{} }
|
||||
func (m *ProposalResponsePayload) String() string { return proto.CompactTextString(m) }
|
||||
func (*ProposalResponsePayload) ProtoMessage() {}
|
||||
func (*ProposalResponsePayload) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_2ed51030656d961a, []int{2}
|
||||
}
|
||||
|
||||
func (m *ProposalResponsePayload) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ProposalResponsePayload.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ProposalResponsePayload) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ProposalResponsePayload.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ProposalResponsePayload) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ProposalResponsePayload.Merge(m, src)
|
||||
}
|
||||
func (m *ProposalResponsePayload) XXX_Size() int {
|
||||
return xxx_messageInfo_ProposalResponsePayload.Size(m)
|
||||
}
|
||||
func (m *ProposalResponsePayload) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ProposalResponsePayload.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ProposalResponsePayload proto.InternalMessageInfo
|
||||
|
||||
func (m *ProposalResponsePayload) GetProposalHash() []byte {
|
||||
if m != nil {
|
||||
return m.ProposalHash
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ProposalResponsePayload) GetExtension() []byte {
|
||||
if m != nil {
|
||||
return m.Extension
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// An endorsement is a signature of an endorser over a proposal response. By
|
||||
// producing an endorsement message, an endorser implicitly "approves" that
|
||||
// proposal response and the actions contained therein. When enough
|
||||
// endorsements have been collected, a transaction can be generated out of a
|
||||
// set of proposal responses. Note that this message only contains an identity
|
||||
// and a signature but no signed payload. This is intentional because
|
||||
// endorsements are supposed to be collected in a transaction, and they are all
|
||||
// expected to endorse a single proposal response/action (many endorsements
|
||||
// over a single proposal response)
|
||||
type Endorsement struct {
|
||||
// Identity of the endorser (e.g. its certificate)
|
||||
Endorser []byte `protobuf:"bytes,1,opt,name=endorser,proto3" json:"endorser,omitempty"`
|
||||
// Signature of the payload included in ProposalResponse concatenated with
|
||||
// the endorser's certificate; ie, sign(ProposalResponse.payload + endorser)
|
||||
Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Endorsement) Reset() { *m = Endorsement{} }
|
||||
func (m *Endorsement) String() string { return proto.CompactTextString(m) }
|
||||
func (*Endorsement) ProtoMessage() {}
|
||||
func (*Endorsement) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_2ed51030656d961a, []int{3}
|
||||
}
|
||||
|
||||
func (m *Endorsement) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Endorsement.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Endorsement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Endorsement.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Endorsement) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Endorsement.Merge(m, src)
|
||||
}
|
||||
func (m *Endorsement) XXX_Size() int {
|
||||
return xxx_messageInfo_Endorsement.Size(m)
|
||||
}
|
||||
func (m *Endorsement) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Endorsement.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Endorsement proto.InternalMessageInfo
|
||||
|
||||
func (m *Endorsement) GetEndorser() []byte {
|
||||
if m != nil {
|
||||
return m.Endorser
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Endorsement) GetSignature() []byte {
|
||||
if m != nil {
|
||||
return m.Signature
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*ProposalResponse)(nil), "protos.ProposalResponse")
|
||||
proto.RegisterType((*Response)(nil), "protos.Response")
|
||||
proto.RegisterType((*ProposalResponsePayload)(nil), "protos.ProposalResponsePayload")
|
||||
proto.RegisterType((*Endorsement)(nil), "protos.Endorsement")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("peer/proposal_response.proto", fileDescriptor_2ed51030656d961a) }
|
||||
|
||||
var fileDescriptor_2ed51030656d961a = []byte{
|
||||
// 371 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x92, 0x41, 0x6b, 0xe3, 0x30,
|
||||
0x10, 0x85, 0x71, 0x76, 0x93, 0x4d, 0x94, 0x2c, 0x04, 0x2f, 0xec, 0x9a, 0x10, 0xd8, 0xe0, 0x5e,
|
||||
0x72, 0x48, 0x64, 0x68, 0x29, 0xf4, 0x1c, 0x28, 0xed, 0x31, 0x88, 0xd2, 0x43, 0x29, 0x14, 0x39,
|
||||
0x99, 0xc8, 0x26, 0xb6, 0x25, 0x34, 0x72, 0x69, 0x7e, 0x70, 0xff, 0x47, 0xb1, 0x6c, 0x39, 0x6e,
|
||||
0xe9, 0xc9, 0xbc, 0xf1, 0xd3, 0x37, 0xf3, 0x46, 0x22, 0x73, 0x05, 0xa0, 0x23, 0xa5, 0xa5, 0x92,
|
||||
0xc8, 0xb3, 0x17, 0x0d, 0xa8, 0x64, 0x81, 0x40, 0x95, 0x96, 0x46, 0xfa, 0x03, 0xfb, 0xc1, 0xd9,
|
||||
0x7f, 0x21, 0xa5, 0xc8, 0x20, 0xb2, 0x32, 0x2e, 0x0f, 0x91, 0x49, 0x73, 0x40, 0xc3, 0x73, 0x55,
|
||||
0x1b, 0xc3, 0x77, 0x8f, 0x4c, 0xb7, 0x0d, 0x84, 0x35, 0x0c, 0x3f, 0x20, 0xbf, 0x5e, 0x41, 0x63,
|
||||
0x2a, 0x8b, 0xc0, 0x5b, 0x78, 0xcb, 0x3e, 0x73, 0xd2, 0xbf, 0x21, 0xa3, 0x96, 0x10, 0xf4, 0x16,
|
||||
0xde, 0x72, 0x7c, 0x39, 0xa3, 0x75, 0x0f, 0xea, 0x7a, 0xd0, 0x07, 0xe7, 0x60, 0x67, 0xb3, 0xbf,
|
||||
0x22, 0x43, 0x37, 0x63, 0xf0, 0xd3, 0x1e, 0x9c, 0xd6, 0x27, 0x90, 0xba, 0xbe, 0xac, 0x75, 0x54,
|
||||
0x13, 0x28, 0x7e, 0xca, 0x24, 0xdf, 0x07, 0xfd, 0x85, 0xb7, 0x9c, 0x30, 0x27, 0xfd, 0x6b, 0x32,
|
||||
0x86, 0x62, 0x2f, 0x35, 0x42, 0x0e, 0x85, 0x09, 0x06, 0x16, 0xf5, 0xc7, 0xa1, 0x6e, 0xcf, 0xbf,
|
||||
0x58, 0xd7, 0x17, 0x3e, 0x92, 0x61, 0x1b, 0xef, 0x2f, 0x19, 0xa0, 0xe1, 0xa6, 0xc4, 0x26, 0x5d,
|
||||
0xa3, 0xaa, 0xa6, 0x39, 0x20, 0x72, 0x01, 0x36, 0xda, 0x88, 0x39, 0xd9, 0x1d, 0xe7, 0xc7, 0xa7,
|
||||
0x71, 0xc2, 0x67, 0xf2, 0xef, 0xeb, 0xfa, 0xb6, 0xcd, 0xa4, 0x17, 0xe4, 0x77, 0x7b, 0x3d, 0x09,
|
||||
0xc7, 0xc4, 0x76, 0x9b, 0xb0, 0x89, 0x2b, 0xde, 0x73, 0x4c, 0xfc, 0x39, 0x19, 0xc1, 0x9b, 0x81,
|
||||
0xc2, 0x2e, 0xbb, 0x67, 0x0d, 0xe7, 0x42, 0x78, 0x47, 0xc6, 0x9d, 0x44, 0xfe, 0x8c, 0x0c, 0x9b,
|
||||
0x4c, 0xba, 0x81, 0xb5, 0xba, 0x02, 0x61, 0x2a, 0x0a, 0x6e, 0x4a, 0x0d, 0x0e, 0xd4, 0x16, 0x36,
|
||||
0x47, 0x12, 0x4a, 0x2d, 0x68, 0x72, 0x52, 0xa0, 0x33, 0xd8, 0x0b, 0xd0, 0xf4, 0xc0, 0x63, 0x9d,
|
||||
0xee, 0xdc, 0xe2, 0xaa, 0xd7, 0xb4, 0xf9, 0x26, 0xca, 0xee, 0xc8, 0x05, 0x3c, 0xad, 0x44, 0x6a,
|
||||
0x92, 0x32, 0xa6, 0x3b, 0x99, 0x47, 0x1d, 0x46, 0x54, 0x33, 0xd6, 0x35, 0x63, 0x2d, 0x64, 0x54,
|
||||
0x61, 0xe2, 0xfa, 0xf1, 0x5d, 0x7d, 0x04, 0x00, 0x00, 0xff, 0xff, 0xbf, 0xd6, 0x97, 0x69, 0xa3,
|
||||
0x02, 0x00, 0x00,
|
||||
}
|
@ -0,0 +1,333 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: peer/query.proto
|
||||
|
||||
package peer
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// ChaincodeQueryResponse returns information about each chaincode that pertains
|
||||
// to a query in lscc.go, such as GetChaincodes (returns all chaincodes
|
||||
// instantiated on a channel), and GetInstalledChaincodes (returns all chaincodes
|
||||
// installed on a peer)
|
||||
type ChaincodeQueryResponse struct {
|
||||
Chaincodes []*ChaincodeInfo `protobuf:"bytes,1,rep,name=chaincodes,proto3" json:"chaincodes,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ChaincodeQueryResponse) Reset() { *m = ChaincodeQueryResponse{} }
|
||||
func (m *ChaincodeQueryResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ChaincodeQueryResponse) ProtoMessage() {}
|
||||
func (*ChaincodeQueryResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_d45bcf7fe2423301, []int{0}
|
||||
}
|
||||
|
||||
func (m *ChaincodeQueryResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ChaincodeQueryResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ChaincodeQueryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ChaincodeQueryResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ChaincodeQueryResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ChaincodeQueryResponse.Merge(m, src)
|
||||
}
|
||||
func (m *ChaincodeQueryResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_ChaincodeQueryResponse.Size(m)
|
||||
}
|
||||
func (m *ChaincodeQueryResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ChaincodeQueryResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ChaincodeQueryResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *ChaincodeQueryResponse) GetChaincodes() []*ChaincodeInfo {
|
||||
if m != nil {
|
||||
return m.Chaincodes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ChaincodeInfo contains general information about an installed/instantiated
|
||||
// chaincode
|
||||
type ChaincodeInfo struct {
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
|
||||
// the path as specified by the install/instantiate transaction
|
||||
Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"`
|
||||
// the chaincode function upon instantiation and its arguments. This will be
|
||||
// blank if the query is returning information about installed chaincodes.
|
||||
Input string `protobuf:"bytes,4,opt,name=input,proto3" json:"input,omitempty"`
|
||||
// the name of the ESCC for this chaincode. This will be
|
||||
// blank if the query is returning information about installed chaincodes.
|
||||
Escc string `protobuf:"bytes,5,opt,name=escc,proto3" json:"escc,omitempty"`
|
||||
// the name of the VSCC for this chaincode. This will be
|
||||
// blank if the query is returning information about installed chaincodes.
|
||||
Vscc string `protobuf:"bytes,6,opt,name=vscc,proto3" json:"vscc,omitempty"`
|
||||
// the chaincode unique id.
|
||||
// computed as: H(
|
||||
// H(name || version) ||
|
||||
// H(CodePackage)
|
||||
// )
|
||||
Id []byte `protobuf:"bytes,7,opt,name=id,proto3" json:"id,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ChaincodeInfo) Reset() { *m = ChaincodeInfo{} }
|
||||
func (m *ChaincodeInfo) String() string { return proto.CompactTextString(m) }
|
||||
func (*ChaincodeInfo) ProtoMessage() {}
|
||||
func (*ChaincodeInfo) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_d45bcf7fe2423301, []int{1}
|
||||
}
|
||||
|
||||
func (m *ChaincodeInfo) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ChaincodeInfo.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ChaincodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ChaincodeInfo.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ChaincodeInfo) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ChaincodeInfo.Merge(m, src)
|
||||
}
|
||||
func (m *ChaincodeInfo) XXX_Size() int {
|
||||
return xxx_messageInfo_ChaincodeInfo.Size(m)
|
||||
}
|
||||
func (m *ChaincodeInfo) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ChaincodeInfo.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ChaincodeInfo proto.InternalMessageInfo
|
||||
|
||||
func (m *ChaincodeInfo) GetName() string {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ChaincodeInfo) GetVersion() string {
|
||||
if m != nil {
|
||||
return m.Version
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ChaincodeInfo) GetPath() string {
|
||||
if m != nil {
|
||||
return m.Path
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ChaincodeInfo) GetInput() string {
|
||||
if m != nil {
|
||||
return m.Input
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ChaincodeInfo) GetEscc() string {
|
||||
if m != nil {
|
||||
return m.Escc
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ChaincodeInfo) GetVscc() string {
|
||||
if m != nil {
|
||||
return m.Vscc
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ChaincodeInfo) GetId() []byte {
|
||||
if m != nil {
|
||||
return m.Id
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ChannelQueryResponse returns information about each channel that pertains
|
||||
// to a query in lscc.go, such as GetChannels (returns all channels for a
|
||||
// given peer)
|
||||
type ChannelQueryResponse struct {
|
||||
Channels []*ChannelInfo `protobuf:"bytes,1,rep,name=channels,proto3" json:"channels,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ChannelQueryResponse) Reset() { *m = ChannelQueryResponse{} }
|
||||
func (m *ChannelQueryResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ChannelQueryResponse) ProtoMessage() {}
|
||||
func (*ChannelQueryResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_d45bcf7fe2423301, []int{2}
|
||||
}
|
||||
|
||||
func (m *ChannelQueryResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ChannelQueryResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ChannelQueryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ChannelQueryResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ChannelQueryResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ChannelQueryResponse.Merge(m, src)
|
||||
}
|
||||
func (m *ChannelQueryResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_ChannelQueryResponse.Size(m)
|
||||
}
|
||||
func (m *ChannelQueryResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ChannelQueryResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ChannelQueryResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *ChannelQueryResponse) GetChannels() []*ChannelInfo {
|
||||
if m != nil {
|
||||
return m.Channels
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ChannelInfo contains general information about channels
|
||||
type ChannelInfo struct {
|
||||
ChannelId string `protobuf:"bytes,1,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ChannelInfo) Reset() { *m = ChannelInfo{} }
|
||||
func (m *ChannelInfo) String() string { return proto.CompactTextString(m) }
|
||||
func (*ChannelInfo) ProtoMessage() {}
|
||||
func (*ChannelInfo) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_d45bcf7fe2423301, []int{3}
|
||||
}
|
||||
|
||||
func (m *ChannelInfo) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ChannelInfo.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ChannelInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ChannelInfo.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ChannelInfo) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ChannelInfo.Merge(m, src)
|
||||
}
|
||||
func (m *ChannelInfo) XXX_Size() int {
|
||||
return xxx_messageInfo_ChannelInfo.Size(m)
|
||||
}
|
||||
func (m *ChannelInfo) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ChannelInfo.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ChannelInfo proto.InternalMessageInfo
|
||||
|
||||
func (m *ChannelInfo) GetChannelId() string {
|
||||
if m != nil {
|
||||
return m.ChannelId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// JoinBySnapshotStatus contains information about whether or a JoinBySnapshot operation
|
||||
// is in progress and the related bootstrap dir if it is running.
|
||||
type JoinBySnapshotStatus struct {
|
||||
InProgress bool `protobuf:"varint,1,opt,name=in_progress,json=inProgress,proto3" json:"in_progress,omitempty"`
|
||||
BootstrappingSnapshotDir string `protobuf:"bytes,2,opt,name=bootstrapping_snapshot_dir,json=bootstrappingSnapshotDir,proto3" json:"bootstrapping_snapshot_dir,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *JoinBySnapshotStatus) Reset() { *m = JoinBySnapshotStatus{} }
|
||||
func (m *JoinBySnapshotStatus) String() string { return proto.CompactTextString(m) }
|
||||
func (*JoinBySnapshotStatus) ProtoMessage() {}
|
||||
func (*JoinBySnapshotStatus) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_d45bcf7fe2423301, []int{4}
|
||||
}
|
||||
|
||||
func (m *JoinBySnapshotStatus) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_JoinBySnapshotStatus.Unmarshal(m, b)
|
||||
}
|
||||
func (m *JoinBySnapshotStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_JoinBySnapshotStatus.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *JoinBySnapshotStatus) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_JoinBySnapshotStatus.Merge(m, src)
|
||||
}
|
||||
func (m *JoinBySnapshotStatus) XXX_Size() int {
|
||||
return xxx_messageInfo_JoinBySnapshotStatus.Size(m)
|
||||
}
|
||||
func (m *JoinBySnapshotStatus) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_JoinBySnapshotStatus.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_JoinBySnapshotStatus proto.InternalMessageInfo
|
||||
|
||||
func (m *JoinBySnapshotStatus) GetInProgress() bool {
|
||||
if m != nil {
|
||||
return m.InProgress
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *JoinBySnapshotStatus) GetBootstrappingSnapshotDir() string {
|
||||
if m != nil {
|
||||
return m.BootstrappingSnapshotDir
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*ChaincodeQueryResponse)(nil), "protos.ChaincodeQueryResponse")
|
||||
proto.RegisterType((*ChaincodeInfo)(nil), "protos.ChaincodeInfo")
|
||||
proto.RegisterType((*ChannelQueryResponse)(nil), "protos.ChannelQueryResponse")
|
||||
proto.RegisterType((*ChannelInfo)(nil), "protos.ChannelInfo")
|
||||
proto.RegisterType((*JoinBySnapshotStatus)(nil), "protos.JoinBySnapshotStatus")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("peer/query.proto", fileDescriptor_d45bcf7fe2423301) }
|
||||
|
||||
var fileDescriptor_d45bcf7fe2423301 = []byte{
|
||||
// 366 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x92, 0xcf, 0x6a, 0xe3, 0x30,
|
||||
0x10, 0x87, 0x71, 0xfe, 0x67, 0xb2, 0xbb, 0x2c, 0x6a, 0x5a, 0x44, 0xa1, 0x34, 0xf8, 0x94, 0x43,
|
||||
0x62, 0x43, 0x4b, 0x6f, 0x3d, 0x25, 0x85, 0x92, 0x5e, 0xda, 0x3a, 0xb7, 0x5e, 0x8c, 0x6c, 0x2b,
|
||||
0xb6, 0x20, 0x91, 0x54, 0x49, 0x0e, 0xe4, 0x69, 0xfa, 0xaa, 0x45, 0x96, 0x1d, 0x9c, 0x93, 0x67,
|
||||
0xbe, 0xf9, 0xc6, 0xe2, 0x27, 0x04, 0xff, 0x25, 0xa5, 0x2a, 0xfc, 0x2e, 0xa9, 0x3a, 0x05, 0x52,
|
||||
0x09, 0x23, 0xd0, 0xa0, 0xfa, 0x68, 0xff, 0x1d, 0x6e, 0xd6, 0x05, 0x61, 0x3c, 0x15, 0x19, 0xfd,
|
||||
0xb4, 0xf3, 0x88, 0x6a, 0x29, 0xb8, 0xa6, 0xe8, 0x09, 0x20, 0x6d, 0x26, 0x1a, 0x7b, 0xb3, 0xee,
|
||||
0x7c, 0xf2, 0x70, 0xed, 0xb6, 0x75, 0x70, 0xde, 0xd9, 0xf0, 0x9d, 0x88, 0x5a, 0xa2, 0xff, 0xe3,
|
||||
0xc1, 0xdf, 0x8b, 0x29, 0x42, 0xd0, 0xe3, 0xe4, 0x40, 0xb1, 0x37, 0xf3, 0xe6, 0xe3, 0xa8, 0xaa,
|
||||
0x11, 0x86, 0xe1, 0x91, 0x2a, 0xcd, 0x04, 0xc7, 0x9d, 0x0a, 0x37, 0xad, 0xb5, 0x25, 0x31, 0x05,
|
||||
0xee, 0x3a, 0xdb, 0xd6, 0x68, 0x0a, 0x7d, 0xc6, 0x65, 0x69, 0x70, 0xaf, 0x82, 0xae, 0xb1, 0x26,
|
||||
0xd5, 0x69, 0x8a, 0xfb, 0xce, 0xb4, 0xb5, 0x65, 0x47, 0xcb, 0x06, 0x8e, 0xd9, 0x1a, 0xfd, 0x83,
|
||||
0x0e, 0xcb, 0xf0, 0x70, 0xe6, 0xcd, 0xff, 0x44, 0x1d, 0x96, 0xf9, 0xaf, 0x30, 0x5d, 0x17, 0x84,
|
||||
0x73, 0xba, 0xbf, 0x0c, 0x1c, 0xc2, 0x28, 0x75, 0xbc, 0x89, 0x7b, 0xd5, 0x8a, 0x6b, 0x79, 0x15,
|
||||
0xf6, 0x2c, 0xf9, 0x0b, 0x98, 0xb4, 0x06, 0xe8, 0xae, 0xba, 0x30, 0xdb, 0xc6, 0x2c, 0xab, 0xd3,
|
||||
0x8e, 0x6b, 0xb2, 0xc9, 0xfc, 0x12, 0xa6, 0x6f, 0x82, 0xf1, 0xd5, 0x69, 0xcb, 0x89, 0xd4, 0x85,
|
||||
0x30, 0x5b, 0x43, 0x4c, 0xa9, 0xd1, 0x3d, 0x4c, 0x18, 0x8f, 0xa5, 0x12, 0xb9, 0xa2, 0x5a, 0x57,
|
||||
0x7b, 0xa3, 0x08, 0x18, 0xff, 0xa8, 0x09, 0x7a, 0x86, 0xdb, 0x44, 0x08, 0xa3, 0x8d, 0x22, 0x52,
|
||||
0x32, 0x9e, 0xc7, 0xba, 0xfe, 0x41, 0x9c, 0x31, 0x55, 0x5f, 0x1f, 0xbe, 0x30, 0x9a, 0x13, 0x5e,
|
||||
0x98, 0x5a, 0x45, 0xe0, 0x0b, 0x95, 0x07, 0xc5, 0x49, 0x52, 0xb5, 0xa7, 0x59, 0x4e, 0x55, 0xb0,
|
||||
0x23, 0x89, 0x62, 0x69, 0x93, 0xcd, 0x3e, 0x8d, 0xaf, 0x45, 0xce, 0x4c, 0x51, 0x26, 0x41, 0x2a,
|
||||
0x0e, 0x61, 0x4b, 0x0d, 0x9d, 0xba, 0x74, 0xea, 0x32, 0x17, 0xa1, 0xb5, 0x13, 0xf7, 0x78, 0x1e,
|
||||
0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x2c, 0x43, 0x94, 0x8d, 0x57, 0x02, 0x00, 0x00,
|
||||
}
|
292
chaincode/vendor/github.com/hyperledger/fabric-protos-go/peer/resources.pb.go
generated
vendored
292
chaincode/vendor/github.com/hyperledger/fabric-protos-go/peer/resources.pb.go
generated
vendored
@ -0,0 +1,292 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: peer/resources.proto
|
||||
|
||||
package peer
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
common "github.com/hyperledger/fabric-protos-go/common"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// ChaincodeIdentifier identifies a piece of chaincode. For a peer to accept invocations of
|
||||
// this chaincode, the hash of the installed code must match, as must the version string
|
||||
// included with the install command.
|
||||
type ChaincodeIdentifier struct {
|
||||
Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"`
|
||||
Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ChaincodeIdentifier) Reset() { *m = ChaincodeIdentifier{} }
|
||||
func (m *ChaincodeIdentifier) String() string { return proto.CompactTextString(m) }
|
||||
func (*ChaincodeIdentifier) ProtoMessage() {}
|
||||
func (*ChaincodeIdentifier) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_4991d8496920b696, []int{0}
|
||||
}
|
||||
|
||||
func (m *ChaincodeIdentifier) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ChaincodeIdentifier.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ChaincodeIdentifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ChaincodeIdentifier.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ChaincodeIdentifier) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ChaincodeIdentifier.Merge(m, src)
|
||||
}
|
||||
func (m *ChaincodeIdentifier) XXX_Size() int {
|
||||
return xxx_messageInfo_ChaincodeIdentifier.Size(m)
|
||||
}
|
||||
func (m *ChaincodeIdentifier) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ChaincodeIdentifier.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ChaincodeIdentifier proto.InternalMessageInfo
|
||||
|
||||
func (m *ChaincodeIdentifier) GetHash() []byte {
|
||||
if m != nil {
|
||||
return m.Hash
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ChaincodeIdentifier) GetVersion() string {
|
||||
if m != nil {
|
||||
return m.Version
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// ChaincodeValidation instructs the peer how transactions for this chaincode should be
|
||||
// validated. The only validation mechanism which ships with fabric today is the standard
|
||||
// 'vscc' validation mechanism. This built in validation method utilizes an endorsement policy
|
||||
// which checks that a sufficient number of signatures have been included. The 'arguement'
|
||||
// field encodes any parameters required by the validation implementation.
|
||||
type ChaincodeValidation struct {
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
Argument []byte `protobuf:"bytes,2,opt,name=argument,proto3" json:"argument,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ChaincodeValidation) Reset() { *m = ChaincodeValidation{} }
|
||||
func (m *ChaincodeValidation) String() string { return proto.CompactTextString(m) }
|
||||
func (*ChaincodeValidation) ProtoMessage() {}
|
||||
func (*ChaincodeValidation) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_4991d8496920b696, []int{1}
|
||||
}
|
||||
|
||||
func (m *ChaincodeValidation) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ChaincodeValidation.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ChaincodeValidation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ChaincodeValidation.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ChaincodeValidation) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ChaincodeValidation.Merge(m, src)
|
||||
}
|
||||
func (m *ChaincodeValidation) XXX_Size() int {
|
||||
return xxx_messageInfo_ChaincodeValidation.Size(m)
|
||||
}
|
||||
func (m *ChaincodeValidation) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ChaincodeValidation.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ChaincodeValidation proto.InternalMessageInfo
|
||||
|
||||
func (m *ChaincodeValidation) GetName() string {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ChaincodeValidation) GetArgument() []byte {
|
||||
if m != nil {
|
||||
return m.Argument
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// VSCCArgs is passed (marshaled) as a parameter to the VSCC imlementation via the
|
||||
// argument field of the ChaincodeValidation message.
|
||||
type VSCCArgs struct {
|
||||
EndorsementPolicyRef string `protobuf:"bytes,1,opt,name=endorsement_policy_ref,json=endorsementPolicyRef,proto3" json:"endorsement_policy_ref,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *VSCCArgs) Reset() { *m = VSCCArgs{} }
|
||||
func (m *VSCCArgs) String() string { return proto.CompactTextString(m) }
|
||||
func (*VSCCArgs) ProtoMessage() {}
|
||||
func (*VSCCArgs) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_4991d8496920b696, []int{2}
|
||||
}
|
||||
|
||||
func (m *VSCCArgs) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_VSCCArgs.Unmarshal(m, b)
|
||||
}
|
||||
func (m *VSCCArgs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_VSCCArgs.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *VSCCArgs) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_VSCCArgs.Merge(m, src)
|
||||
}
|
||||
func (m *VSCCArgs) XXX_Size() int {
|
||||
return xxx_messageInfo_VSCCArgs.Size(m)
|
||||
}
|
||||
func (m *VSCCArgs) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_VSCCArgs.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_VSCCArgs proto.InternalMessageInfo
|
||||
|
||||
func (m *VSCCArgs) GetEndorsementPolicyRef() string {
|
||||
if m != nil {
|
||||
return m.EndorsementPolicyRef
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// ChaincodeEndorsement instructs the peer how transactions should be endorsed. The only
|
||||
// endorsement mechanism which ships with the fabric today is the standard 'escc' mechanism.
|
||||
// This code simply simulates the proposal to generate a RW set, then signs the result
|
||||
// using the peer's local signing identity.
|
||||
type ChaincodeEndorsement struct {
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ChaincodeEndorsement) Reset() { *m = ChaincodeEndorsement{} }
|
||||
func (m *ChaincodeEndorsement) String() string { return proto.CompactTextString(m) }
|
||||
func (*ChaincodeEndorsement) ProtoMessage() {}
|
||||
func (*ChaincodeEndorsement) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_4991d8496920b696, []int{3}
|
||||
}
|
||||
|
||||
func (m *ChaincodeEndorsement) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ChaincodeEndorsement.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ChaincodeEndorsement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ChaincodeEndorsement.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ChaincodeEndorsement) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ChaincodeEndorsement.Merge(m, src)
|
||||
}
|
||||
func (m *ChaincodeEndorsement) XXX_Size() int {
|
||||
return xxx_messageInfo_ChaincodeEndorsement.Size(m)
|
||||
}
|
||||
func (m *ChaincodeEndorsement) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ChaincodeEndorsement.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ChaincodeEndorsement proto.InternalMessageInfo
|
||||
|
||||
func (m *ChaincodeEndorsement) GetName() string {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// ConfigTree encapsulates channel and resources configuration of a channel.
|
||||
// Both configurations are represented as common.Config
|
||||
type ConfigTree struct {
|
||||
ChannelConfig *common.Config `protobuf:"bytes,1,opt,name=channel_config,json=channelConfig,proto3" json:"channel_config,omitempty"`
|
||||
ResourcesConfig *common.Config `protobuf:"bytes,2,opt,name=resources_config,json=resourcesConfig,proto3" json:"resources_config,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ConfigTree) Reset() { *m = ConfigTree{} }
|
||||
func (m *ConfigTree) String() string { return proto.CompactTextString(m) }
|
||||
func (*ConfigTree) ProtoMessage() {}
|
||||
func (*ConfigTree) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_4991d8496920b696, []int{4}
|
||||
}
|
||||
|
||||
func (m *ConfigTree) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ConfigTree.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ConfigTree) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ConfigTree.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ConfigTree) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ConfigTree.Merge(m, src)
|
||||
}
|
||||
func (m *ConfigTree) XXX_Size() int {
|
||||
return xxx_messageInfo_ConfigTree.Size(m)
|
||||
}
|
||||
func (m *ConfigTree) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ConfigTree.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ConfigTree proto.InternalMessageInfo
|
||||
|
||||
func (m *ConfigTree) GetChannelConfig() *common.Config {
|
||||
if m != nil {
|
||||
return m.ChannelConfig
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ConfigTree) GetResourcesConfig() *common.Config {
|
||||
if m != nil {
|
||||
return m.ResourcesConfig
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*ChaincodeIdentifier)(nil), "protos.ChaincodeIdentifier")
|
||||
proto.RegisterType((*ChaincodeValidation)(nil), "protos.ChaincodeValidation")
|
||||
proto.RegisterType((*VSCCArgs)(nil), "protos.VSCCArgs")
|
||||
proto.RegisterType((*ChaincodeEndorsement)(nil), "protos.ChaincodeEndorsement")
|
||||
proto.RegisterType((*ConfigTree)(nil), "protos.ConfigTree")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("peer/resources.proto", fileDescriptor_4991d8496920b696) }
|
||||
|
||||
var fileDescriptor_4991d8496920b696 = []byte{
|
||||
// 327 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0x3f, 0x4f, 0xc3, 0x30,
|
||||
0x10, 0xc5, 0xd5, 0x0a, 0x41, 0x6b, 0x4a, 0x41, 0xa6, 0xa0, 0xaa, 0x53, 0x95, 0xa9, 0x42, 0x34,
|
||||
0x91, 0xf8, 0x33, 0xb0, 0x01, 0x51, 0x07, 0x36, 0x64, 0x50, 0x07, 0x96, 0xca, 0x75, 0x2e, 0x8e,
|
||||
0xa5, 0xc4, 0x8e, 0xce, 0x29, 0xa2, 0x0b, 0x9f, 0x1d, 0xc5, 0x6e, 0x43, 0x86, 0x4e, 0xbe, 0xf3,
|
||||
0xfb, 0xdd, 0xd3, 0xe9, 0x1d, 0x19, 0x95, 0x00, 0x18, 0x21, 0x58, 0xb3, 0x41, 0x01, 0x36, 0x2c,
|
||||
0xd1, 0x54, 0x86, 0x1e, 0xbb, 0xc7, 0x4e, 0xae, 0x84, 0x29, 0x0a, 0xa3, 0x23, 0x61, 0x74, 0xaa,
|
||||
0x64, 0xf5, 0xe3, 0xe5, 0x20, 0x26, 0x97, 0x71, 0xc6, 0x95, 0x16, 0x26, 0x81, 0xb7, 0x04, 0x74,
|
||||
0xa5, 0x52, 0x05, 0x48, 0x29, 0x39, 0xca, 0xb8, 0xcd, 0xc6, 0x9d, 0x69, 0x67, 0x36, 0x60, 0xae,
|
||||
0xa6, 0x63, 0x72, 0xf2, 0x0d, 0x68, 0x95, 0xd1, 0xe3, 0xee, 0xb4, 0x33, 0xeb, 0xb3, 0x7d, 0x1b,
|
||||
0x2c, 0x5a, 0x26, 0x4b, 0x9e, 0xab, 0x84, 0x57, 0xca, 0xe8, 0xda, 0x44, 0xf3, 0x02, 0x9c, 0x49,
|
||||
0x9f, 0xb9, 0x9a, 0x4e, 0x48, 0x8f, 0xa3, 0xdc, 0x14, 0xa0, 0x2b, 0xe7, 0x32, 0x60, 0x4d, 0x1f,
|
||||
0x3c, 0x93, 0xde, 0xf2, 0x23, 0x8e, 0x5f, 0x50, 0x5a, 0xfa, 0x40, 0xae, 0x41, 0x27, 0x06, 0x2d,
|
||||
0xd4, 0xd2, 0xaa, 0x34, 0xb9, 0x12, 0xdb, 0x15, 0x42, 0xba, 0x73, 0x1b, 0xb5, 0xd4, 0x77, 0x27,
|
||||
0x32, 0x48, 0x83, 0x1b, 0x32, 0x6a, 0x16, 0x59, 0xfc, 0x03, 0x87, 0x36, 0x09, 0x7e, 0x09, 0x89,
|
||||
0x5d, 0x16, 0x9f, 0x08, 0x40, 0x1f, 0xc9, 0x50, 0x64, 0x5c, 0x6b, 0xc8, 0x57, 0x3e, 0x21, 0xc7,
|
||||
0x9e, 0xde, 0x0d, 0x43, 0x9f, 0x5b, 0xe8, 0x59, 0x76, 0xb6, 0xa3, 0x7c, 0x4b, 0x9f, 0xc8, 0x45,
|
||||
0x13, 0xf8, 0x7e, 0xb0, 0x7b, 0x70, 0xf0, 0xbc, 0xe1, 0xfc, 0xc7, 0x2b, 0x23, 0x81, 0x41, 0x19,
|
||||
0x66, 0xdb, 0x12, 0x30, 0x87, 0x44, 0x02, 0x86, 0x29, 0x5f, 0xa3, 0x12, 0xfe, 0x32, 0x36, 0xac,
|
||||
0xcf, 0xf9, 0x75, 0x2b, 0x55, 0x95, 0x6d, 0xd6, 0xb5, 0x59, 0xd4, 0x42, 0x23, 0x8f, 0xce, 0x3d,
|
||||
0x3a, 0x97, 0x26, 0xaa, 0xe9, 0xb5, 0x3f, 0xf6, 0xfd, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xae,
|
||||
0x32, 0x27, 0x0c, 0x0b, 0x02, 0x00, 0x00,
|
||||
}
|
112
chaincode/vendor/github.com/hyperledger/fabric-protos-go/peer/signed_cc_dep_spec.pb.go
generated
vendored
112
chaincode/vendor/github.com/hyperledger/fabric-protos-go/peer/signed_cc_dep_spec.pb.go
generated
vendored
@ -0,0 +1,112 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: peer/signed_cc_dep_spec.proto
|
||||
|
||||
package peer
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// SignedChaincodeDeploymentSpec carries the CDS along with endorsements
|
||||
type SignedChaincodeDeploymentSpec struct {
|
||||
// This is the bytes of the ChaincodeDeploymentSpec
|
||||
ChaincodeDeploymentSpec []byte `protobuf:"bytes,1,opt,name=chaincode_deployment_spec,json=chaincodeDeploymentSpec,proto3" json:"chaincode_deployment_spec,omitempty"`
|
||||
// This is the instantiation policy which is identical in structure
|
||||
// to endorsement policy. This policy is checked by the VSCC at commit
|
||||
// time on the instantiation (all peers will get the same policy as it
|
||||
// will be part of the LSCC instantation record and will be part of the
|
||||
// hash as well)
|
||||
InstantiationPolicy []byte `protobuf:"bytes,2,opt,name=instantiation_policy,json=instantiationPolicy,proto3" json:"instantiation_policy,omitempty"`
|
||||
// The endorsements of the above deployment spec, the owner's signature over
|
||||
// chaincode_deployment_spec and Endorsement.endorser.
|
||||
OwnerEndorsements []*Endorsement `protobuf:"bytes,3,rep,name=owner_endorsements,json=ownerEndorsements,proto3" json:"owner_endorsements,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *SignedChaincodeDeploymentSpec) Reset() { *m = SignedChaincodeDeploymentSpec{} }
|
||||
func (m *SignedChaincodeDeploymentSpec) String() string { return proto.CompactTextString(m) }
|
||||
func (*SignedChaincodeDeploymentSpec) ProtoMessage() {}
|
||||
func (*SignedChaincodeDeploymentSpec) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_e007a7c0644e7e6f, []int{0}
|
||||
}
|
||||
|
||||
func (m *SignedChaincodeDeploymentSpec) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_SignedChaincodeDeploymentSpec.Unmarshal(m, b)
|
||||
}
|
||||
func (m *SignedChaincodeDeploymentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_SignedChaincodeDeploymentSpec.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *SignedChaincodeDeploymentSpec) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_SignedChaincodeDeploymentSpec.Merge(m, src)
|
||||
}
|
||||
func (m *SignedChaincodeDeploymentSpec) XXX_Size() int {
|
||||
return xxx_messageInfo_SignedChaincodeDeploymentSpec.Size(m)
|
||||
}
|
||||
func (m *SignedChaincodeDeploymentSpec) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_SignedChaincodeDeploymentSpec.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_SignedChaincodeDeploymentSpec proto.InternalMessageInfo
|
||||
|
||||
func (m *SignedChaincodeDeploymentSpec) GetChaincodeDeploymentSpec() []byte {
|
||||
if m != nil {
|
||||
return m.ChaincodeDeploymentSpec
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *SignedChaincodeDeploymentSpec) GetInstantiationPolicy() []byte {
|
||||
if m != nil {
|
||||
return m.InstantiationPolicy
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *SignedChaincodeDeploymentSpec) GetOwnerEndorsements() []*Endorsement {
|
||||
if m != nil {
|
||||
return m.OwnerEndorsements
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*SignedChaincodeDeploymentSpec)(nil), "protos.SignedChaincodeDeploymentSpec")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("peer/signed_cc_dep_spec.proto", fileDescriptor_e007a7c0644e7e6f) }
|
||||
|
||||
var fileDescriptor_e007a7c0644e7e6f = []byte{
|
||||
// 258 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x90, 0x41, 0x4b, 0xf3, 0x40,
|
||||
0x10, 0x86, 0xc9, 0x57, 0xf8, 0x0e, 0xab, 0x17, 0x53, 0xc1, 0x28, 0x16, 0x4a, 0x4f, 0x3d, 0xd8,
|
||||
0x04, 0xf5, 0xe6, 0xb1, 0xea, 0x5d, 0xd2, 0x9b, 0x97, 0x25, 0x99, 0x8c, 0xc9, 0x42, 0xba, 0x33,
|
||||
0xcc, 0xac, 0x48, 0xfe, 0xa6, 0xbf, 0x48, 0xb2, 0xb1, 0x58, 0x0f, 0x9e, 0x16, 0xf6, 0x79, 0xde,
|
||||
0x99, 0xe1, 0x35, 0x0b, 0x46, 0x94, 0x42, 0x5d, 0xeb, 0xb1, 0xb1, 0x00, 0xb6, 0x41, 0xb6, 0xca,
|
||||
0x08, 0x39, 0x0b, 0x05, 0x4a, 0xff, 0xc7, 0x47, 0xaf, 0xae, 0xa3, 0xc6, 0x42, 0x4c, 0x5a, 0xf5,
|
||||
0x56, 0x50, 0x99, 0xbc, 0xe2, 0x64, 0xad, 0x3e, 0x13, 0xb3, 0xd8, 0xc5, 0x11, 0x8f, 0x5d, 0xe5,
|
||||
0x3c, 0x50, 0x83, 0x4f, 0xc8, 0x3d, 0x0d, 0x7b, 0xf4, 0x61, 0xc7, 0x08, 0xe9, 0x83, 0xb9, 0x84,
|
||||
0x03, 0x1a, 0x77, 0x7c, 0xb3, 0xb8, 0x2a, 0x4b, 0x96, 0xc9, 0xfa, 0xb4, 0xbc, 0x80, 0x3f, 0xb2,
|
||||
0xb7, 0xe6, 0xdc, 0x79, 0x0d, 0x95, 0x0f, 0xae, 0x0a, 0x8e, 0xbc, 0x65, 0xea, 0x1d, 0x0c, 0xd9,
|
||||
0xbf, 0x18, 0x9b, 0xff, 0x62, 0x2f, 0x11, 0xa5, 0x5b, 0x93, 0xd2, 0x87, 0x47, 0xb1, 0xe8, 0x1b,
|
||||
0x12, 0xc5, 0x71, 0x96, 0x66, 0xb3, 0xe5, 0x6c, 0x7d, 0x72, 0x37, 0x9f, 0x8e, 0xd6, 0xfc, 0xf9,
|
||||
0x87, 0x95, 0x67, 0x51, 0x3f, 0xfa, 0xd1, 0x6d, 0x69, 0x56, 0x24, 0x6d, 0xde, 0x0d, 0x8c, 0xd2,
|
||||
0x63, 0xd3, 0xa2, 0xe4, 0x6f, 0x55, 0x2d, 0x0e, 0x0e, 0xf9, 0xb1, 0x92, 0xd7, 0x9b, 0xd6, 0x85,
|
||||
0xee, 0xbd, 0xce, 0x81, 0xf6, 0xc5, 0x91, 0x5a, 0x4c, 0xea, 0x66, 0x52, 0x37, 0x2d, 0x15, 0xa3,
|
||||
0x5d, 0x4f, 0x75, 0xde, 0x7f, 0x05, 0x00, 0x00, 0xff, 0xff, 0xa0, 0xc6, 0x22, 0xdb, 0x76, 0x01,
|
||||
0x00, 0x00,
|
||||
}
|
@ -0,0 +1,422 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: peer/snapshot.proto
|
||||
|
||||
package peer
|
||||
|
||||
import (
|
||||
context "context"
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
empty "github.com/golang/protobuf/ptypes/empty"
|
||||
common "github.com/hyperledger/fabric-protos-go/common"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// SnapshotRequest contains information for a generate/cancel snapshot request
|
||||
type SnapshotRequest struct {
|
||||
// The signature header that contains creator identity and nonce
|
||||
SignatureHeader *common.SignatureHeader `protobuf:"bytes,1,opt,name=signature_header,json=signatureHeader,proto3" json:"signature_header,omitempty"`
|
||||
// The channel ID
|
||||
ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"`
|
||||
// The block number to generate a snapshot
|
||||
BlockNumber uint64 `protobuf:"varint,3,opt,name=block_number,json=blockNumber,proto3" json:"block_number,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *SnapshotRequest) Reset() { *m = SnapshotRequest{} }
|
||||
func (m *SnapshotRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*SnapshotRequest) ProtoMessage() {}
|
||||
func (*SnapshotRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_d05a247df97d1516, []int{0}
|
||||
}
|
||||
|
||||
func (m *SnapshotRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_SnapshotRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *SnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_SnapshotRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *SnapshotRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_SnapshotRequest.Merge(m, src)
|
||||
}
|
||||
func (m *SnapshotRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_SnapshotRequest.Size(m)
|
||||
}
|
||||
func (m *SnapshotRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_SnapshotRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_SnapshotRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *SnapshotRequest) GetSignatureHeader() *common.SignatureHeader {
|
||||
if m != nil {
|
||||
return m.SignatureHeader
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *SnapshotRequest) GetChannelId() string {
|
||||
if m != nil {
|
||||
return m.ChannelId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *SnapshotRequest) GetBlockNumber() uint64 {
|
||||
if m != nil {
|
||||
return m.BlockNumber
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// SnapshotQuery contains information for a query snapshot request
|
||||
type SnapshotQuery struct {
|
||||
// The signature header that contains creator identity and nonce
|
||||
SignatureHeader *common.SignatureHeader `protobuf:"bytes,1,opt,name=signature_header,json=signatureHeader,proto3" json:"signature_header,omitempty"`
|
||||
// The channel ID
|
||||
ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *SnapshotQuery) Reset() { *m = SnapshotQuery{} }
|
||||
func (m *SnapshotQuery) String() string { return proto.CompactTextString(m) }
|
||||
func (*SnapshotQuery) ProtoMessage() {}
|
||||
func (*SnapshotQuery) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_d05a247df97d1516, []int{1}
|
||||
}
|
||||
|
||||
func (m *SnapshotQuery) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_SnapshotQuery.Unmarshal(m, b)
|
||||
}
|
||||
func (m *SnapshotQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_SnapshotQuery.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *SnapshotQuery) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_SnapshotQuery.Merge(m, src)
|
||||
}
|
||||
func (m *SnapshotQuery) XXX_Size() int {
|
||||
return xxx_messageInfo_SnapshotQuery.Size(m)
|
||||
}
|
||||
func (m *SnapshotQuery) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_SnapshotQuery.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_SnapshotQuery proto.InternalMessageInfo
|
||||
|
||||
func (m *SnapshotQuery) GetSignatureHeader() *common.SignatureHeader {
|
||||
if m != nil {
|
||||
return m.SignatureHeader
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *SnapshotQuery) GetChannelId() string {
|
||||
if m != nil {
|
||||
return m.ChannelId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// SignedSnapshotRequest contains marshalled request bytes and signature
|
||||
type SignedSnapshotRequest struct {
|
||||
// The bytes of SnapshotRequest or SnapshotQuery
|
||||
Request []byte `protobuf:"bytes,1,opt,name=request,proto3" json:"request,omitempty"`
|
||||
// Signaure over request bytes; this signature is to be verified against the client identity
|
||||
Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *SignedSnapshotRequest) Reset() { *m = SignedSnapshotRequest{} }
|
||||
func (m *SignedSnapshotRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*SignedSnapshotRequest) ProtoMessage() {}
|
||||
func (*SignedSnapshotRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_d05a247df97d1516, []int{2}
|
||||
}
|
||||
|
||||
func (m *SignedSnapshotRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_SignedSnapshotRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *SignedSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_SignedSnapshotRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *SignedSnapshotRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_SignedSnapshotRequest.Merge(m, src)
|
||||
}
|
||||
func (m *SignedSnapshotRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_SignedSnapshotRequest.Size(m)
|
||||
}
|
||||
func (m *SignedSnapshotRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_SignedSnapshotRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_SignedSnapshotRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *SignedSnapshotRequest) GetRequest() []byte {
|
||||
if m != nil {
|
||||
return m.Request
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *SignedSnapshotRequest) GetSignature() []byte {
|
||||
if m != nil {
|
||||
return m.Signature
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// QueryPendingSnapshotsResponse specifies the response payload of a query pending snapshots request
|
||||
type QueryPendingSnapshotsResponse struct {
|
||||
BlockNumbers []uint64 `protobuf:"varint,1,rep,packed,name=block_numbers,json=blockNumbers,proto3" json:"block_numbers,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *QueryPendingSnapshotsResponse) Reset() { *m = QueryPendingSnapshotsResponse{} }
|
||||
func (m *QueryPendingSnapshotsResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*QueryPendingSnapshotsResponse) ProtoMessage() {}
|
||||
func (*QueryPendingSnapshotsResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_d05a247df97d1516, []int{3}
|
||||
}
|
||||
|
||||
func (m *QueryPendingSnapshotsResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_QueryPendingSnapshotsResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *QueryPendingSnapshotsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_QueryPendingSnapshotsResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *QueryPendingSnapshotsResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_QueryPendingSnapshotsResponse.Merge(m, src)
|
||||
}
|
||||
func (m *QueryPendingSnapshotsResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_QueryPendingSnapshotsResponse.Size(m)
|
||||
}
|
||||
func (m *QueryPendingSnapshotsResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_QueryPendingSnapshotsResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_QueryPendingSnapshotsResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *QueryPendingSnapshotsResponse) GetBlockNumbers() []uint64 {
|
||||
if m != nil {
|
||||
return m.BlockNumbers
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*SnapshotRequest)(nil), "protos.SnapshotRequest")
|
||||
proto.RegisterType((*SnapshotQuery)(nil), "protos.SnapshotQuery")
|
||||
proto.RegisterType((*SignedSnapshotRequest)(nil), "protos.SignedSnapshotRequest")
|
||||
proto.RegisterType((*QueryPendingSnapshotsResponse)(nil), "protos.QueryPendingSnapshotsResponse")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("peer/snapshot.proto", fileDescriptor_d05a247df97d1516) }
|
||||
|
||||
var fileDescriptor_d05a247df97d1516 = []byte{
|
||||
// 398 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x53, 0xcb, 0x8e, 0xd3, 0x30,
|
||||
0x14, 0x9d, 0x30, 0xa3, 0x61, 0x7a, 0x27, 0x55, 0x91, 0x2b, 0x20, 0x2a, 0x54, 0x0a, 0x41, 0x48,
|
||||
0x59, 0x50, 0x47, 0x2a, 0x5f, 0x40, 0x0b, 0x02, 0x36, 0x3c, 0xdc, 0x05, 0x12, 0x9b, 0x2a, 0x8f,
|
||||
0x5b, 0x27, 0x22, 0xb1, 0x83, 0x9d, 0x2c, 0xfa, 0x25, 0x7c, 0x24, 0x3f, 0x81, 0x62, 0xd7, 0x10,
|
||||
0x10, 0x02, 0x89, 0xc5, 0xac, 0x1c, 0x9f, 0x7b, 0x7c, 0x7c, 0x72, 0x7c, 0x2f, 0xcc, 0x5b, 0x44,
|
||||
0x95, 0x68, 0x91, 0xb6, 0xba, 0x94, 0x1d, 0x6d, 0x95, 0xec, 0x24, 0xb9, 0x34, 0x8b, 0x5e, 0x3c,
|
||||
0xe0, 0x52, 0xf2, 0x1a, 0x13, 0xb3, 0xcd, 0xfa, 0x43, 0x82, 0x4d, 0xdb, 0x1d, 0x2d, 0x69, 0x31,
|
||||
0xcf, 0x65, 0xd3, 0x48, 0x91, 0xd8, 0xc5, 0x82, 0xd1, 0x57, 0x0f, 0x66, 0xbb, 0x93, 0x18, 0xc3,
|
||||
0x2f, 0x3d, 0xea, 0x8e, 0x6c, 0xe0, 0x8e, 0xae, 0xb8, 0x48, 0xbb, 0x5e, 0xe1, 0xbe, 0xc4, 0xb4,
|
||||
0x40, 0x15, 0x78, 0xa1, 0x17, 0x5f, 0xaf, 0xef, 0xd3, 0xd3, 0xe1, 0x9d, 0xab, 0xbf, 0x36, 0x65,
|
||||
0x36, 0xd3, 0xbf, 0x02, 0x64, 0x09, 0x90, 0x97, 0xa9, 0x10, 0x58, 0xef, 0xab, 0x22, 0xb8, 0x15,
|
||||
0x7a, 0xf1, 0x84, 0x4d, 0x4e, 0xc8, 0x9b, 0x82, 0x3c, 0x02, 0x3f, 0xab, 0x65, 0xfe, 0x79, 0x2f,
|
||||
0xfa, 0x26, 0x43, 0x15, 0x9c, 0x87, 0x5e, 0x7c, 0xc1, 0xae, 0x0d, 0xf6, 0xd6, 0x40, 0x91, 0x82,
|
||||
0xa9, 0x33, 0xf6, 0xa1, 0x47, 0x75, 0xbc, 0x01, 0x5b, 0xd1, 0x3b, 0xb8, 0x3b, 0x48, 0x60, 0xf1,
|
||||
0x7b, 0x24, 0x01, 0xdc, 0x56, 0xf6, 0xd3, 0x5c, 0xe9, 0x33, 0xb7, 0x25, 0x0f, 0x61, 0xf2, 0xe3,
|
||||
0x12, 0x23, 0xe8, 0xb3, 0x9f, 0x40, 0xf4, 0x02, 0x96, 0xc6, 0xfc, 0x7b, 0x14, 0x45, 0x25, 0xb8,
|
||||
0x93, 0xd5, 0x0c, 0x75, 0x2b, 0x85, 0x46, 0xf2, 0x18, 0xa6, 0xe3, 0x20, 0x74, 0xe0, 0x85, 0xe7,
|
||||
0xf1, 0x05, 0xf3, 0x47, 0x49, 0xe8, 0xf5, 0x37, 0x0f, 0xae, 0xdc, 0x51, 0xb2, 0x85, 0xab, 0x57,
|
||||
0x28, 0x50, 0xa5, 0x1d, 0x92, 0xa5, 0x7d, 0x45, 0x4d, 0xff, 0xe8, 0x7a, 0x71, 0x8f, 0xda, 0x7e,
|
||||
0xa0, 0xae, 0x1f, 0xe8, 0xcb, 0xa1, 0x1f, 0xa2, 0x33, 0xf2, 0x1c, 0x2e, 0xb7, 0xa9, 0xc8, 0xb1,
|
||||
0xfe, 0x7f, 0x89, 0x8f, 0x30, 0x1d, 0xff, 0x9a, 0xfe, 0x97, 0xd2, 0x13, 0x57, 0xfe, 0x6b, 0x20,
|
||||
0xd1, 0xd9, 0x86, 0x41, 0x24, 0x15, 0xa7, 0xe5, 0xb1, 0x45, 0x55, 0x63, 0xc1, 0x51, 0xd1, 0x43,
|
||||
0x9a, 0xa9, 0x2a, 0x77, 0x02, 0xc3, 0x04, 0x7c, 0x7a, 0xca, 0xab, 0xae, 0xec, 0xb3, 0xe1, 0xe5,
|
||||
0x93, 0x11, 0x35, 0xb1, 0xd4, 0x95, 0xa5, 0xae, 0xb8, 0x4c, 0x06, 0x76, 0x66, 0x07, 0xe4, 0xd9,
|
||||
0xf7, 0x00, 0x00, 0x00, 0xff, 0xff, 0x2f, 0x39, 0x38, 0xb5, 0x3e, 0x03, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// SnapshotClient is the client API for Snapshot service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type SnapshotClient interface {
|
||||
// Generate a snapshot reqeust. SignedSnapshotRequest contains marshalled bytes for SnaphostRequest
|
||||
Generate(ctx context.Context, in *SignedSnapshotRequest, opts ...grpc.CallOption) (*empty.Empty, error)
|
||||
// Cancel a snapshot reqeust. SignedSnapshotRequest contains marshalled bytes for SnaphostRequest
|
||||
Cancel(ctx context.Context, in *SignedSnapshotRequest, opts ...grpc.CallOption) (*empty.Empty, error)
|
||||
// Query pending snapshots query. SignedSnapshotRequest contains marshalled bytes for SnaphostQuery
|
||||
QueryPendings(ctx context.Context, in *SignedSnapshotRequest, opts ...grpc.CallOption) (*QueryPendingSnapshotsResponse, error)
|
||||
}
|
||||
|
||||
type snapshotClient struct {
|
||||
cc *grpc.ClientConn
|
||||
}
|
||||
|
||||
func NewSnapshotClient(cc *grpc.ClientConn) SnapshotClient {
|
||||
return &snapshotClient{cc}
|
||||
}
|
||||
|
||||
func (c *snapshotClient) Generate(ctx context.Context, in *SignedSnapshotRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
|
||||
out := new(empty.Empty)
|
||||
err := c.cc.Invoke(ctx, "/protos.Snapshot/Generate", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *snapshotClient) Cancel(ctx context.Context, in *SignedSnapshotRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
|
||||
out := new(empty.Empty)
|
||||
err := c.cc.Invoke(ctx, "/protos.Snapshot/Cancel", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *snapshotClient) QueryPendings(ctx context.Context, in *SignedSnapshotRequest, opts ...grpc.CallOption) (*QueryPendingSnapshotsResponse, error) {
|
||||
out := new(QueryPendingSnapshotsResponse)
|
||||
err := c.cc.Invoke(ctx, "/protos.Snapshot/QueryPendings", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// SnapshotServer is the server API for Snapshot service.
|
||||
type SnapshotServer interface {
|
||||
// Generate a snapshot reqeust. SignedSnapshotRequest contains marshalled bytes for SnaphostRequest
|
||||
Generate(context.Context, *SignedSnapshotRequest) (*empty.Empty, error)
|
||||
// Cancel a snapshot reqeust. SignedSnapshotRequest contains marshalled bytes for SnaphostRequest
|
||||
Cancel(context.Context, *SignedSnapshotRequest) (*empty.Empty, error)
|
||||
// Query pending snapshots query. SignedSnapshotRequest contains marshalled bytes for SnaphostQuery
|
||||
QueryPendings(context.Context, *SignedSnapshotRequest) (*QueryPendingSnapshotsResponse, error)
|
||||
}
|
||||
|
||||
// UnimplementedSnapshotServer can be embedded to have forward compatible implementations.
|
||||
type UnimplementedSnapshotServer struct {
|
||||
}
|
||||
|
||||
func (*UnimplementedSnapshotServer) Generate(ctx context.Context, req *SignedSnapshotRequest) (*empty.Empty, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Generate not implemented")
|
||||
}
|
||||
func (*UnimplementedSnapshotServer) Cancel(ctx context.Context, req *SignedSnapshotRequest) (*empty.Empty, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Cancel not implemented")
|
||||
}
|
||||
func (*UnimplementedSnapshotServer) QueryPendings(ctx context.Context, req *SignedSnapshotRequest) (*QueryPendingSnapshotsResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method QueryPendings not implemented")
|
||||
}
|
||||
|
||||
func RegisterSnapshotServer(s *grpc.Server, srv SnapshotServer) {
|
||||
s.RegisterService(&_Snapshot_serviceDesc, srv)
|
||||
}
|
||||
|
||||
func _Snapshot_Generate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(SignedSnapshotRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(SnapshotServer).Generate(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/protos.Snapshot/Generate",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(SnapshotServer).Generate(ctx, req.(*SignedSnapshotRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Snapshot_Cancel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(SignedSnapshotRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(SnapshotServer).Cancel(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/protos.Snapshot/Cancel",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(SnapshotServer).Cancel(ctx, req.(*SignedSnapshotRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Snapshot_QueryPendings_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(SignedSnapshotRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(SnapshotServer).QueryPendings(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/protos.Snapshot/QueryPendings",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(SnapshotServer).QueryPendings(ctx, req.(*SignedSnapshotRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
var _Snapshot_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "protos.Snapshot",
|
||||
HandlerType: (*SnapshotServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "Generate",
|
||||
Handler: _Snapshot_Generate_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "Cancel",
|
||||
Handler: _Snapshot_Cancel_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "QueryPendings",
|
||||
Handler: _Snapshot_QueryPendings_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "peer/snapshot.proto",
|
||||
}
|
494
chaincode/vendor/github.com/hyperledger/fabric-protos-go/peer/transaction.pb.go
generated
vendored
494
chaincode/vendor/github.com/hyperledger/fabric-protos-go/peer/transaction.pb.go
generated
vendored
@ -0,0 +1,494 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: peer/transaction.proto
|
||||
|
||||
package peer
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
common "github.com/hyperledger/fabric-protos-go/common"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
type TxValidationCode int32
|
||||
|
||||
const (
|
||||
TxValidationCode_VALID TxValidationCode = 0
|
||||
TxValidationCode_NIL_ENVELOPE TxValidationCode = 1
|
||||
TxValidationCode_BAD_PAYLOAD TxValidationCode = 2
|
||||
TxValidationCode_BAD_COMMON_HEADER TxValidationCode = 3
|
||||
TxValidationCode_BAD_CREATOR_SIGNATURE TxValidationCode = 4
|
||||
TxValidationCode_INVALID_ENDORSER_TRANSACTION TxValidationCode = 5
|
||||
TxValidationCode_INVALID_CONFIG_TRANSACTION TxValidationCode = 6
|
||||
TxValidationCode_UNSUPPORTED_TX_PAYLOAD TxValidationCode = 7
|
||||
TxValidationCode_BAD_PROPOSAL_TXID TxValidationCode = 8
|
||||
TxValidationCode_DUPLICATE_TXID TxValidationCode = 9
|
||||
TxValidationCode_ENDORSEMENT_POLICY_FAILURE TxValidationCode = 10
|
||||
TxValidationCode_MVCC_READ_CONFLICT TxValidationCode = 11
|
||||
TxValidationCode_PHANTOM_READ_CONFLICT TxValidationCode = 12
|
||||
TxValidationCode_UNKNOWN_TX_TYPE TxValidationCode = 13
|
||||
TxValidationCode_TARGET_CHAIN_NOT_FOUND TxValidationCode = 14
|
||||
TxValidationCode_MARSHAL_TX_ERROR TxValidationCode = 15
|
||||
TxValidationCode_NIL_TXACTION TxValidationCode = 16
|
||||
TxValidationCode_EXPIRED_CHAINCODE TxValidationCode = 17
|
||||
TxValidationCode_CHAINCODE_VERSION_CONFLICT TxValidationCode = 18
|
||||
TxValidationCode_BAD_HEADER_EXTENSION TxValidationCode = 19
|
||||
TxValidationCode_BAD_CHANNEL_HEADER TxValidationCode = 20
|
||||
TxValidationCode_BAD_RESPONSE_PAYLOAD TxValidationCode = 21
|
||||
TxValidationCode_BAD_RWSET TxValidationCode = 22
|
||||
TxValidationCode_ILLEGAL_WRITESET TxValidationCode = 23
|
||||
TxValidationCode_INVALID_WRITESET TxValidationCode = 24
|
||||
TxValidationCode_INVALID_CHAINCODE TxValidationCode = 25
|
||||
TxValidationCode_NOT_VALIDATED TxValidationCode = 254
|
||||
TxValidationCode_INVALID_OTHER_REASON TxValidationCode = 255
|
||||
)
|
||||
|
||||
var TxValidationCode_name = map[int32]string{
|
||||
0: "VALID",
|
||||
1: "NIL_ENVELOPE",
|
||||
2: "BAD_PAYLOAD",
|
||||
3: "BAD_COMMON_HEADER",
|
||||
4: "BAD_CREATOR_SIGNATURE",
|
||||
5: "INVALID_ENDORSER_TRANSACTION",
|
||||
6: "INVALID_CONFIG_TRANSACTION",
|
||||
7: "UNSUPPORTED_TX_PAYLOAD",
|
||||
8: "BAD_PROPOSAL_TXID",
|
||||
9: "DUPLICATE_TXID",
|
||||
10: "ENDORSEMENT_POLICY_FAILURE",
|
||||
11: "MVCC_READ_CONFLICT",
|
||||
12: "PHANTOM_READ_CONFLICT",
|
||||
13: "UNKNOWN_TX_TYPE",
|
||||
14: "TARGET_CHAIN_NOT_FOUND",
|
||||
15: "MARSHAL_TX_ERROR",
|
||||
16: "NIL_TXACTION",
|
||||
17: "EXPIRED_CHAINCODE",
|
||||
18: "CHAINCODE_VERSION_CONFLICT",
|
||||
19: "BAD_HEADER_EXTENSION",
|
||||
20: "BAD_CHANNEL_HEADER",
|
||||
21: "BAD_RESPONSE_PAYLOAD",
|
||||
22: "BAD_RWSET",
|
||||
23: "ILLEGAL_WRITESET",
|
||||
24: "INVALID_WRITESET",
|
||||
25: "INVALID_CHAINCODE",
|
||||
254: "NOT_VALIDATED",
|
||||
255: "INVALID_OTHER_REASON",
|
||||
}
|
||||
|
||||
var TxValidationCode_value = map[string]int32{
|
||||
"VALID": 0,
|
||||
"NIL_ENVELOPE": 1,
|
||||
"BAD_PAYLOAD": 2,
|
||||
"BAD_COMMON_HEADER": 3,
|
||||
"BAD_CREATOR_SIGNATURE": 4,
|
||||
"INVALID_ENDORSER_TRANSACTION": 5,
|
||||
"INVALID_CONFIG_TRANSACTION": 6,
|
||||
"UNSUPPORTED_TX_PAYLOAD": 7,
|
||||
"BAD_PROPOSAL_TXID": 8,
|
||||
"DUPLICATE_TXID": 9,
|
||||
"ENDORSEMENT_POLICY_FAILURE": 10,
|
||||
"MVCC_READ_CONFLICT": 11,
|
||||
"PHANTOM_READ_CONFLICT": 12,
|
||||
"UNKNOWN_TX_TYPE": 13,
|
||||
"TARGET_CHAIN_NOT_FOUND": 14,
|
||||
"MARSHAL_TX_ERROR": 15,
|
||||
"NIL_TXACTION": 16,
|
||||
"EXPIRED_CHAINCODE": 17,
|
||||
"CHAINCODE_VERSION_CONFLICT": 18,
|
||||
"BAD_HEADER_EXTENSION": 19,
|
||||
"BAD_CHANNEL_HEADER": 20,
|
||||
"BAD_RESPONSE_PAYLOAD": 21,
|
||||
"BAD_RWSET": 22,
|
||||
"ILLEGAL_WRITESET": 23,
|
||||
"INVALID_WRITESET": 24,
|
||||
"INVALID_CHAINCODE": 25,
|
||||
"NOT_VALIDATED": 254,
|
||||
"INVALID_OTHER_REASON": 255,
|
||||
}
|
||||
|
||||
func (x TxValidationCode) String() string {
|
||||
return proto.EnumName(TxValidationCode_name, int32(x))
|
||||
}
|
||||
|
||||
func (TxValidationCode) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_25804bbfb0752368, []int{0}
|
||||
}
|
||||
|
||||
// Reserved entries in the key-level metadata map
|
||||
type MetaDataKeys int32
|
||||
|
||||
const (
|
||||
MetaDataKeys_VALIDATION_PARAMETER MetaDataKeys = 0
|
||||
MetaDataKeys_VALIDATION_PARAMETER_V2 MetaDataKeys = 1
|
||||
)
|
||||
|
||||
var MetaDataKeys_name = map[int32]string{
|
||||
0: "VALIDATION_PARAMETER",
|
||||
1: "VALIDATION_PARAMETER_V2",
|
||||
}
|
||||
|
||||
var MetaDataKeys_value = map[string]int32{
|
||||
"VALIDATION_PARAMETER": 0,
|
||||
"VALIDATION_PARAMETER_V2": 1,
|
||||
}
|
||||
|
||||
func (x MetaDataKeys) String() string {
|
||||
return proto.EnumName(MetaDataKeys_name, int32(x))
|
||||
}
|
||||
|
||||
func (MetaDataKeys) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_25804bbfb0752368, []int{1}
|
||||
}
|
||||
|
||||
// ProcessedTransaction wraps an Envelope that includes a transaction along with an indication
|
||||
// of whether the transaction was validated or invalidated by committing peer.
|
||||
// The use case is that GetTransactionByID API needs to retrieve the transaction Envelope
|
||||
// from block storage, and return it to a client, and indicate whether the transaction
|
||||
// was validated or invalidated by committing peer. So that the originally submitted
|
||||
// transaction Envelope is not modified, the ProcessedTransaction wrapper is returned.
|
||||
type ProcessedTransaction struct {
|
||||
// An Envelope which includes a processed transaction
|
||||
TransactionEnvelope *common.Envelope `protobuf:"bytes,1,opt,name=transactionEnvelope,proto3" json:"transactionEnvelope,omitempty"`
|
||||
// An indication of whether the transaction was validated or invalidated by committing peer
|
||||
ValidationCode int32 `protobuf:"varint,2,opt,name=validationCode,proto3" json:"validationCode,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ProcessedTransaction) Reset() { *m = ProcessedTransaction{} }
|
||||
func (m *ProcessedTransaction) String() string { return proto.CompactTextString(m) }
|
||||
func (*ProcessedTransaction) ProtoMessage() {}
|
||||
func (*ProcessedTransaction) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_25804bbfb0752368, []int{0}
|
||||
}
|
||||
|
||||
func (m *ProcessedTransaction) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ProcessedTransaction.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ProcessedTransaction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ProcessedTransaction.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ProcessedTransaction) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ProcessedTransaction.Merge(m, src)
|
||||
}
|
||||
func (m *ProcessedTransaction) XXX_Size() int {
|
||||
return xxx_messageInfo_ProcessedTransaction.Size(m)
|
||||
}
|
||||
func (m *ProcessedTransaction) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ProcessedTransaction.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ProcessedTransaction proto.InternalMessageInfo
|
||||
|
||||
func (m *ProcessedTransaction) GetTransactionEnvelope() *common.Envelope {
|
||||
if m != nil {
|
||||
return m.TransactionEnvelope
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ProcessedTransaction) GetValidationCode() int32 {
|
||||
if m != nil {
|
||||
return m.ValidationCode
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// The transaction to be sent to the ordering service. A transaction contains
|
||||
// one or more TransactionAction. Each TransactionAction binds a proposal to
|
||||
// potentially multiple actions. The transaction is atomic meaning that either
|
||||
// all actions in the transaction will be committed or none will. Note that
|
||||
// while a Transaction might include more than one Header, the Header.creator
|
||||
// field must be the same in each.
|
||||
// A single client is free to issue a number of independent Proposal, each with
|
||||
// their header (Header) and request payload (ChaincodeProposalPayload). Each
|
||||
// proposal is independently endorsed generating an action
|
||||
// (ProposalResponsePayload) with one signature per Endorser. Any number of
|
||||
// independent proposals (and their action) might be included in a transaction
|
||||
// to ensure that they are treated atomically.
|
||||
type Transaction struct {
|
||||
// The payload is an array of TransactionAction. An array is necessary to
|
||||
// accommodate multiple actions per transaction
|
||||
Actions []*TransactionAction `protobuf:"bytes,1,rep,name=actions,proto3" json:"actions,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Transaction) Reset() { *m = Transaction{} }
|
||||
func (m *Transaction) String() string { return proto.CompactTextString(m) }
|
||||
func (*Transaction) ProtoMessage() {}
|
||||
func (*Transaction) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_25804bbfb0752368, []int{1}
|
||||
}
|
||||
|
||||
func (m *Transaction) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Transaction.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Transaction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Transaction.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Transaction) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Transaction.Merge(m, src)
|
||||
}
|
||||
func (m *Transaction) XXX_Size() int {
|
||||
return xxx_messageInfo_Transaction.Size(m)
|
||||
}
|
||||
func (m *Transaction) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Transaction.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Transaction proto.InternalMessageInfo
|
||||
|
||||
func (m *Transaction) GetActions() []*TransactionAction {
|
||||
if m != nil {
|
||||
return m.Actions
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TransactionAction binds a proposal to its action. The type field in the
|
||||
// header dictates the type of action to be applied to the ledger.
|
||||
type TransactionAction struct {
|
||||
// The header of the proposal action, which is the proposal header
|
||||
Header []byte `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
|
||||
// The payload of the action as defined by the type in the header For
|
||||
// chaincode, it's the bytes of ChaincodeActionPayload
|
||||
Payload []byte `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *TransactionAction) Reset() { *m = TransactionAction{} }
|
||||
func (m *TransactionAction) String() string { return proto.CompactTextString(m) }
|
||||
func (*TransactionAction) ProtoMessage() {}
|
||||
func (*TransactionAction) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_25804bbfb0752368, []int{2}
|
||||
}
|
||||
|
||||
func (m *TransactionAction) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_TransactionAction.Unmarshal(m, b)
|
||||
}
|
||||
func (m *TransactionAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_TransactionAction.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *TransactionAction) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_TransactionAction.Merge(m, src)
|
||||
}
|
||||
func (m *TransactionAction) XXX_Size() int {
|
||||
return xxx_messageInfo_TransactionAction.Size(m)
|
||||
}
|
||||
func (m *TransactionAction) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_TransactionAction.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_TransactionAction proto.InternalMessageInfo
|
||||
|
||||
func (m *TransactionAction) GetHeader() []byte {
|
||||
if m != nil {
|
||||
return m.Header
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *TransactionAction) GetPayload() []byte {
|
||||
if m != nil {
|
||||
return m.Payload
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ChaincodeActionPayload is the message to be used for the TransactionAction's
|
||||
// payload when the Header's type is set to CHAINCODE. It carries the
|
||||
// chaincodeProposalPayload and an endorsed action to apply to the ledger.
|
||||
type ChaincodeActionPayload struct {
|
||||
// This field contains the bytes of the ChaincodeProposalPayload message from
|
||||
// the original invocation (essentially the arguments) after the application
|
||||
// of the visibility function. The main visibility modes are "full" (the
|
||||
// entire ChaincodeProposalPayload message is included here), "hash" (only
|
||||
// the hash of the ChaincodeProposalPayload message is included) or
|
||||
// "nothing". This field will be used to check the consistency of
|
||||
// ProposalResponsePayload.proposalHash. For the CHAINCODE type,
|
||||
// ProposalResponsePayload.proposalHash is supposed to be H(ProposalHeader ||
|
||||
// f(ChaincodeProposalPayload)) where f is the visibility function.
|
||||
ChaincodeProposalPayload []byte `protobuf:"bytes,1,opt,name=chaincode_proposal_payload,json=chaincodeProposalPayload,proto3" json:"chaincode_proposal_payload,omitempty"`
|
||||
// The list of actions to apply to the ledger
|
||||
Action *ChaincodeEndorsedAction `protobuf:"bytes,2,opt,name=action,proto3" json:"action,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ChaincodeActionPayload) Reset() { *m = ChaincodeActionPayload{} }
|
||||
func (m *ChaincodeActionPayload) String() string { return proto.CompactTextString(m) }
|
||||
func (*ChaincodeActionPayload) ProtoMessage() {}
|
||||
func (*ChaincodeActionPayload) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_25804bbfb0752368, []int{3}
|
||||
}
|
||||
|
||||
func (m *ChaincodeActionPayload) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ChaincodeActionPayload.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ChaincodeActionPayload) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ChaincodeActionPayload.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ChaincodeActionPayload) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ChaincodeActionPayload.Merge(m, src)
|
||||
}
|
||||
func (m *ChaincodeActionPayload) XXX_Size() int {
|
||||
return xxx_messageInfo_ChaincodeActionPayload.Size(m)
|
||||
}
|
||||
func (m *ChaincodeActionPayload) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ChaincodeActionPayload.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ChaincodeActionPayload proto.InternalMessageInfo
|
||||
|
||||
func (m *ChaincodeActionPayload) GetChaincodeProposalPayload() []byte {
|
||||
if m != nil {
|
||||
return m.ChaincodeProposalPayload
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ChaincodeActionPayload) GetAction() *ChaincodeEndorsedAction {
|
||||
if m != nil {
|
||||
return m.Action
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ChaincodeEndorsedAction carries information about the endorsement of a
|
||||
// specific proposal
|
||||
type ChaincodeEndorsedAction struct {
|
||||
// This is the bytes of the ProposalResponsePayload message signed by the
|
||||
// endorsers. Recall that for the CHAINCODE type, the
|
||||
// ProposalResponsePayload's extenstion field carries a ChaincodeAction
|
||||
ProposalResponsePayload []byte `protobuf:"bytes,1,opt,name=proposal_response_payload,json=proposalResponsePayload,proto3" json:"proposal_response_payload,omitempty"`
|
||||
// The endorsement of the proposal, basically the endorser's signature over
|
||||
// proposalResponsePayload
|
||||
Endorsements []*Endorsement `protobuf:"bytes,2,rep,name=endorsements,proto3" json:"endorsements,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ChaincodeEndorsedAction) Reset() { *m = ChaincodeEndorsedAction{} }
|
||||
func (m *ChaincodeEndorsedAction) String() string { return proto.CompactTextString(m) }
|
||||
func (*ChaincodeEndorsedAction) ProtoMessage() {}
|
||||
func (*ChaincodeEndorsedAction) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_25804bbfb0752368, []int{4}
|
||||
}
|
||||
|
||||
func (m *ChaincodeEndorsedAction) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ChaincodeEndorsedAction.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ChaincodeEndorsedAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ChaincodeEndorsedAction.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ChaincodeEndorsedAction) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ChaincodeEndorsedAction.Merge(m, src)
|
||||
}
|
||||
func (m *ChaincodeEndorsedAction) XXX_Size() int {
|
||||
return xxx_messageInfo_ChaincodeEndorsedAction.Size(m)
|
||||
}
|
||||
func (m *ChaincodeEndorsedAction) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ChaincodeEndorsedAction.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ChaincodeEndorsedAction proto.InternalMessageInfo
|
||||
|
||||
func (m *ChaincodeEndorsedAction) GetProposalResponsePayload() []byte {
|
||||
if m != nil {
|
||||
return m.ProposalResponsePayload
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ChaincodeEndorsedAction) GetEndorsements() []*Endorsement {
|
||||
if m != nil {
|
||||
return m.Endorsements
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("protos.TxValidationCode", TxValidationCode_name, TxValidationCode_value)
|
||||
proto.RegisterEnum("protos.MetaDataKeys", MetaDataKeys_name, MetaDataKeys_value)
|
||||
proto.RegisterType((*ProcessedTransaction)(nil), "protos.ProcessedTransaction")
|
||||
proto.RegisterType((*Transaction)(nil), "protos.Transaction")
|
||||
proto.RegisterType((*TransactionAction)(nil), "protos.TransactionAction")
|
||||
proto.RegisterType((*ChaincodeActionPayload)(nil), "protos.ChaincodeActionPayload")
|
||||
proto.RegisterType((*ChaincodeEndorsedAction)(nil), "protos.ChaincodeEndorsedAction")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("peer/transaction.proto", fileDescriptor_25804bbfb0752368) }
|
||||
|
||||
var fileDescriptor_25804bbfb0752368 = []byte{
|
||||
// 838 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x54, 0x5d, 0x6f, 0x22, 0x37,
|
||||
0x14, 0xdd, 0xc9, 0x36, 0x49, 0x63, 0x48, 0xe2, 0x18, 0x42, 0x80, 0xae, 0xda, 0x88, 0x87, 0x2a,
|
||||
0x5a, 0x75, 0x41, 0xca, 0x3e, 0x54, 0xaa, 0xfa, 0x62, 0x66, 0x6e, 0xc2, 0x68, 0x07, 0x7b, 0xe4,
|
||||
0x31, 0x84, 0xf4, 0xc5, 0x9a, 0x80, 0x97, 0xa0, 0x12, 0x06, 0xcd, 0xd0, 0x55, 0xf3, 0xda, 0x1f,
|
||||
0xd0, 0xfe, 0x90, 0xfe, 0xc7, 0xb6, 0xf2, 0x7c, 0x85, 0x64, 0xb7, 0x2f, 0x18, 0x9f, 0x73, 0xee,
|
||||
0xbd, 0xe7, 0xde, 0xab, 0x31, 0x6a, 0xac, 0xb5, 0x8e, 0x7b, 0x9b, 0x38, 0x5c, 0x25, 0xe1, 0x74,
|
||||
0xb3, 0x88, 0x56, 0xdd, 0x75, 0x1c, 0x6d, 0x22, 0xb2, 0x97, 0x1e, 0x49, 0xfb, 0x4d, 0xca, 0xaf,
|
||||
0xe3, 0x68, 0x1d, 0x25, 0xe1, 0x52, 0xc5, 0x3a, 0x59, 0x47, 0xab, 0x44, 0x67, 0xaa, 0x76, 0x6d,
|
||||
0x1a, 0x3d, 0x3c, 0x44, 0xab, 0x5e, 0x76, 0x64, 0x60, 0xe7, 0x0f, 0x0b, 0xd5, 0xfd, 0x38, 0x9a,
|
||||
0xea, 0x24, 0xd1, 0x33, 0xf9, 0x94, 0x99, 0xf4, 0x51, 0x6d, 0xab, 0x10, 0xac, 0x3e, 0xe9, 0x65,
|
||||
0xb4, 0xd6, 0x4d, 0xeb, 0xdc, 0xba, 0xa8, 0x5c, 0xe2, 0x6e, 0x9e, 0xa4, 0xc0, 0xc5, 0x97, 0xc4,
|
||||
0xe4, 0x7b, 0x74, 0xf4, 0x29, 0x5c, 0x2e, 0x66, 0xa1, 0x41, 0xed, 0x68, 0xa6, 0x9b, 0x3b, 0xe7,
|
||||
0xd6, 0xc5, 0xae, 0x78, 0x81, 0x76, 0xfa, 0xa8, 0xb2, 0x5d, 0xfa, 0x3d, 0xda, 0xcf, 0xfe, 0x25,
|
||||
0x4d, 0xeb, 0xfc, 0xf5, 0x45, 0xe5, 0xb2, 0x95, 0x99, 0x4d, 0xba, 0x5b, 0x2a, 0x9a, 0xfe, 0x8a,
|
||||
0x42, 0xd9, 0x01, 0x74, 0xf2, 0x19, 0x4b, 0x1a, 0x68, 0xef, 0x5e, 0x87, 0x33, 0x1d, 0xa7, 0xbe,
|
||||
0xab, 0x22, 0xbf, 0x91, 0x26, 0xda, 0x5f, 0x87, 0x8f, 0xcb, 0x28, 0x9c, 0xa5, 0x8e, 0xaa, 0xa2,
|
||||
0xb8, 0x76, 0xfe, 0xb2, 0x50, 0xc3, 0xbe, 0x0f, 0x17, 0xab, 0x69, 0x34, 0xd3, 0x59, 0x16, 0x3f,
|
||||
0xa3, 0xc8, 0xcf, 0xa8, 0x3d, 0x2d, 0x18, 0x55, 0x0e, 0xb9, 0xc8, 0x93, 0x15, 0x68, 0x96, 0x0a,
|
||||
0x3f, 0x17, 0x14, 0xd1, 0x3f, 0xa2, 0xbd, 0xcc, 0x5a, 0x5a, 0xb1, 0x72, 0xf9, 0x5d, 0xd1, 0x53,
|
||||
0x59, 0x0d, 0x56, 0xb3, 0x28, 0x4e, 0xf4, 0x2c, 0xef, 0x2c, 0x97, 0x77, 0xfe, 0xb4, 0xd0, 0xd9,
|
||||
0xff, 0x68, 0xc8, 0x4f, 0xa8, 0xf5, 0xd9, 0xb6, 0x5f, 0x38, 0x3a, 0x2b, 0x04, 0x22, 0xe7, 0x9f,
|
||||
0x0c, 0x55, 0x75, 0x96, 0xed, 0x41, 0xaf, 0x36, 0x49, 0x73, 0x27, 0x1d, 0x75, 0xad, 0xb0, 0x05,
|
||||
0x4f, 0x9c, 0x78, 0x26, 0x7c, 0xfb, 0xf7, 0x2e, 0xc2, 0xf2, 0xf7, 0xf1, 0xb3, 0x15, 0x92, 0x03,
|
||||
0xb4, 0x3b, 0xa6, 0x9e, 0xeb, 0xe0, 0x57, 0x04, 0xa3, 0x2a, 0x73, 0x3d, 0x05, 0x6c, 0x0c, 0x1e,
|
||||
0xf7, 0x01, 0x5b, 0xe4, 0x18, 0x55, 0xfa, 0xd4, 0x51, 0x3e, 0xbd, 0xf5, 0x38, 0x75, 0xf0, 0x0e,
|
||||
0x39, 0x45, 0x27, 0x06, 0xb0, 0xf9, 0x70, 0xc8, 0x99, 0x1a, 0x00, 0x75, 0x40, 0xe0, 0xd7, 0xa4,
|
||||
0x85, 0x4e, 0x53, 0x58, 0x00, 0x95, 0x5c, 0xa8, 0xc0, 0xbd, 0x66, 0x54, 0x8e, 0x04, 0xe0, 0xaf,
|
||||
0xc8, 0x39, 0x7a, 0xe3, 0xb2, 0xb4, 0x82, 0x02, 0xe6, 0x70, 0x11, 0x80, 0x50, 0x52, 0x50, 0x16,
|
||||
0x50, 0x5b, 0xba, 0x9c, 0xe1, 0x5d, 0xf2, 0x2d, 0x6a, 0x17, 0x0a, 0x9b, 0xb3, 0x2b, 0xf7, 0xfa,
|
||||
0x19, 0xbf, 0x47, 0xda, 0xa8, 0x31, 0x62, 0xc1, 0xc8, 0xf7, 0xb9, 0x90, 0xe0, 0x28, 0x39, 0x29,
|
||||
0xfd, 0xec, 0x17, 0x7e, 0x7c, 0xc1, 0x7d, 0x1e, 0x50, 0x4f, 0xc9, 0x89, 0xeb, 0xe0, 0xaf, 0x09,
|
||||
0x41, 0x47, 0xce, 0xc8, 0xf7, 0x5c, 0x9b, 0x4a, 0xc8, 0xb0, 0x03, 0x53, 0x26, 0x37, 0x30, 0x04,
|
||||
0x26, 0x95, 0xcf, 0x3d, 0xd7, 0xbe, 0x55, 0x57, 0xd4, 0xf5, 0x8c, 0x51, 0x44, 0x1a, 0x88, 0x0c,
|
||||
0xc7, 0xb6, 0xad, 0x04, 0xd0, 0xcc, 0x88, 0xe7, 0xda, 0x12, 0x57, 0x4c, 0x6f, 0xfe, 0x80, 0x32,
|
||||
0xc9, 0x87, 0x2f, 0xa8, 0x2a, 0xa9, 0xa1, 0xe3, 0x11, 0xfb, 0xc0, 0xf8, 0x0d, 0x33, 0xae, 0xe4,
|
||||
0xad, 0x0f, 0xf8, 0xd0, 0xd8, 0x95, 0x54, 0x5c, 0x83, 0x54, 0xf6, 0x80, 0xba, 0x4c, 0x31, 0x2e,
|
||||
0xd5, 0x15, 0x1f, 0x31, 0x07, 0x1f, 0x91, 0x3a, 0xc2, 0x43, 0x2a, 0x82, 0x41, 0xea, 0x54, 0x81,
|
||||
0x10, 0x5c, 0xe0, 0xe3, 0x62, 0xee, 0x72, 0x92, 0xb7, 0x8c, 0x4d, 0x5b, 0x30, 0xf1, 0x5d, 0x01,
|
||||
0x4e, 0x96, 0xc4, 0xe6, 0x0e, 0xe0, 0x13, 0xd3, 0x42, 0x79, 0x55, 0x63, 0x10, 0x81, 0xcb, 0xd9,
|
||||
0x93, 0x1f, 0x42, 0x9a, 0xa8, 0x6e, 0xa6, 0x91, 0xad, 0x45, 0xc1, 0x44, 0x02, 0x33, 0x12, 0x5c,
|
||||
0x33, 0xcd, 0xa5, 0x0b, 0x1a, 0x50, 0xc6, 0xc0, 0x2b, 0x16, 0x57, 0x2f, 0x22, 0x04, 0x04, 0x3e,
|
||||
0x67, 0x01, 0x94, 0x93, 0x3d, 0x25, 0x87, 0xe8, 0x20, 0x65, 0x6e, 0x02, 0x90, 0xb8, 0x61, 0x9c,
|
||||
0xbb, 0x9e, 0x07, 0xd7, 0xd4, 0x53, 0x37, 0xc2, 0x95, 0x60, 0xd0, 0xb3, 0x14, 0xcd, 0x57, 0x57,
|
||||
0xa2, 0x4d, 0xe3, 0xbe, 0x5c, 0x68, 0xe9, 0xbe, 0x45, 0x08, 0x3a, 0x34, 0xb3, 0x48, 0x09, 0x2a,
|
||||
0xc1, 0xc1, 0xff, 0x58, 0xa4, 0x85, 0xea, 0x85, 0x94, 0xcb, 0x01, 0x08, 0x33, 0xe2, 0x80, 0x33,
|
||||
0xfc, 0xaf, 0xf5, 0x16, 0x50, 0x75, 0xa8, 0x37, 0xa1, 0x13, 0x6e, 0xc2, 0x0f, 0xfa, 0x31, 0x31,
|
||||
0x56, 0xf3, 0x50, 0xd3, 0xb5, 0x4f, 0x05, 0x1d, 0x82, 0x04, 0x81, 0x5f, 0x91, 0x6f, 0xd0, 0xd9,
|
||||
0x97, 0x18, 0x35, 0xbe, 0xc4, 0x56, 0xff, 0x23, 0xea, 0x44, 0xf1, 0xbc, 0x7b, 0xff, 0xb8, 0xd6,
|
||||
0xf1, 0x52, 0xcf, 0xe6, 0x3a, 0xee, 0x7e, 0x0c, 0xef, 0xe2, 0xc5, 0xb4, 0xf8, 0x5e, 0xcc, 0xd3,
|
||||
0xdb, 0x27, 0x5b, 0x4f, 0x90, 0x1f, 0x4e, 0x7f, 0x0d, 0xe7, 0xfa, 0x97, 0x1f, 0xe6, 0x8b, 0xcd,
|
||||
0xfd, 0x6f, 0x77, 0xe6, 0xc5, 0xec, 0x6d, 0x85, 0xf7, 0xb2, 0xf0, 0x77, 0x59, 0xf8, 0xbb, 0x79,
|
||||
0xd4, 0x33, 0x19, 0xee, 0xb2, 0xa7, 0xfc, 0xfd, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa8, 0xb6,
|
||||
0xfa, 0xa9, 0xeb, 0x05, 0x00, 0x00,
|
||||
}
|
@ -0,0 +1,3 @@
|
||||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/AUTHORS.
|
@ -0,0 +1,3 @@
|
||||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
@ -0,0 +1,22 @@
|
||||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the Go project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of Go, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of Go. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of Go or any code incorporated within this
|
||||
implementation of Go constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of Go
|
||||
shall terminate as of the date such litigation is filed.
|
@ -0,0 +1,50 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package httpguts provides functions implementing various details
|
||||
// of the HTTP specification.
|
||||
//
|
||||
// This package is shared by the standard library (which vendors it)
|
||||
// and x/net/http2. It comes with no API stability promise.
|
||||
package httpguts
|
||||
|
||||
import (
|
||||
"net/textproto"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ValidTrailerHeader reports whether name is a valid header field name to appear
|
||||
// in trailers.
|
||||
// See RFC 7230, Section 4.1.2
|
||||
func ValidTrailerHeader(name string) bool {
|
||||
name = textproto.CanonicalMIMEHeaderKey(name)
|
||||
if strings.HasPrefix(name, "If-") || badTrailer[name] {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
var badTrailer = map[string]bool{
|
||||
"Authorization": true,
|
||||
"Cache-Control": true,
|
||||
"Connection": true,
|
||||
"Content-Encoding": true,
|
||||
"Content-Length": true,
|
||||
"Content-Range": true,
|
||||
"Content-Type": true,
|
||||
"Expect": true,
|
||||
"Host": true,
|
||||
"Keep-Alive": true,
|
||||
"Max-Forwards": true,
|
||||
"Pragma": true,
|
||||
"Proxy-Authenticate": true,
|
||||
"Proxy-Authorization": true,
|
||||
"Proxy-Connection": true,
|
||||
"Range": true,
|
||||
"Realm": true,
|
||||
"Te": true,
|
||||
"Trailer": true,
|
||||
"Transfer-Encoding": true,
|
||||
"Www-Authenticate": true,
|
||||
}
|
@ -0,0 +1,346 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package httpguts
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/net/idna"
|
||||
)
|
||||
|
||||
var isTokenTable = [127]bool{
|
||||
'!': true,
|
||||
'#': true,
|
||||
'$': true,
|
||||
'%': true,
|
||||
'&': true,
|
||||
'\'': true,
|
||||
'*': true,
|
||||
'+': true,
|
||||
'-': true,
|
||||
'.': true,
|
||||
'0': true,
|
||||
'1': true,
|
||||
'2': true,
|
||||
'3': true,
|
||||
'4': true,
|
||||
'5': true,
|
||||
'6': true,
|
||||
'7': true,
|
||||
'8': true,
|
||||
'9': true,
|
||||
'A': true,
|
||||
'B': true,
|
||||
'C': true,
|
||||
'D': true,
|
||||
'E': true,
|
||||
'F': true,
|
||||
'G': true,
|
||||
'H': true,
|
||||
'I': true,
|
||||
'J': true,
|
||||
'K': true,
|
||||
'L': true,
|
||||
'M': true,
|
||||
'N': true,
|
||||
'O': true,
|
||||
'P': true,
|
||||
'Q': true,
|
||||
'R': true,
|
||||
'S': true,
|
||||
'T': true,
|
||||
'U': true,
|
||||
'W': true,
|
||||
'V': true,
|
||||
'X': true,
|
||||
'Y': true,
|
||||
'Z': true,
|
||||
'^': true,
|
||||
'_': true,
|
||||
'`': true,
|
||||
'a': true,
|
||||
'b': true,
|
||||
'c': true,
|
||||
'd': true,
|
||||
'e': true,
|
||||
'f': true,
|
||||
'g': true,
|
||||
'h': true,
|
||||
'i': true,
|
||||
'j': true,
|
||||
'k': true,
|
||||
'l': true,
|
||||
'm': true,
|
||||
'n': true,
|
||||
'o': true,
|
||||
'p': true,
|
||||
'q': true,
|
||||
'r': true,
|
||||
's': true,
|
||||
't': true,
|
||||
'u': true,
|
||||
'v': true,
|
||||
'w': true,
|
||||
'x': true,
|
||||
'y': true,
|
||||
'z': true,
|
||||
'|': true,
|
||||
'~': true,
|
||||
}
|
||||
|
||||
func IsTokenRune(r rune) bool {
|
||||
i := int(r)
|
||||
return i < len(isTokenTable) && isTokenTable[i]
|
||||
}
|
||||
|
||||
func isNotToken(r rune) bool {
|
||||
return !IsTokenRune(r)
|
||||
}
|
||||
|
||||
// HeaderValuesContainsToken reports whether any string in values
|
||||
// contains the provided token, ASCII case-insensitively.
|
||||
func HeaderValuesContainsToken(values []string, token string) bool {
|
||||
for _, v := range values {
|
||||
if headerValueContainsToken(v, token) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isOWS reports whether b is an optional whitespace byte, as defined
|
||||
// by RFC 7230 section 3.2.3.
|
||||
func isOWS(b byte) bool { return b == ' ' || b == '\t' }
|
||||
|
||||
// trimOWS returns x with all optional whitespace removes from the
|
||||
// beginning and end.
|
||||
func trimOWS(x string) string {
|
||||
// TODO: consider using strings.Trim(x, " \t") instead,
|
||||
// if and when it's fast enough. See issue 10292.
|
||||
// But this ASCII-only code will probably always beat UTF-8
|
||||
// aware code.
|
||||
for len(x) > 0 && isOWS(x[0]) {
|
||||
x = x[1:]
|
||||
}
|
||||
for len(x) > 0 && isOWS(x[len(x)-1]) {
|
||||
x = x[:len(x)-1]
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// headerValueContainsToken reports whether v (assumed to be a
|
||||
// 0#element, in the ABNF extension described in RFC 7230 section 7)
|
||||
// contains token amongst its comma-separated tokens, ASCII
|
||||
// case-insensitively.
|
||||
func headerValueContainsToken(v string, token string) bool {
|
||||
v = trimOWS(v)
|
||||
if comma := strings.IndexByte(v, ','); comma != -1 {
|
||||
return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token)
|
||||
}
|
||||
return tokenEqual(v, token)
|
||||
}
|
||||
|
||||
// lowerASCII returns the ASCII lowercase version of b.
|
||||
func lowerASCII(b byte) byte {
|
||||
if 'A' <= b && b <= 'Z' {
|
||||
return b + ('a' - 'A')
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively.
|
||||
func tokenEqual(t1, t2 string) bool {
|
||||
if len(t1) != len(t2) {
|
||||
return false
|
||||
}
|
||||
for i, b := range t1 {
|
||||
if b >= utf8.RuneSelf {
|
||||
// No UTF-8 or non-ASCII allowed in tokens.
|
||||
return false
|
||||
}
|
||||
if lowerASCII(byte(b)) != lowerASCII(t2[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// isLWS reports whether b is linear white space, according
|
||||
// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2
|
||||
// LWS = [CRLF] 1*( SP | HT )
|
||||
func isLWS(b byte) bool { return b == ' ' || b == '\t' }
|
||||
|
||||
// isCTL reports whether b is a control byte, according
|
||||
// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2
|
||||
// CTL = <any US-ASCII control character
|
||||
// (octets 0 - 31) and DEL (127)>
|
||||
func isCTL(b byte) bool {
|
||||
const del = 0x7f // a CTL
|
||||
return b < ' ' || b == del
|
||||
}
|
||||
|
||||
// ValidHeaderFieldName reports whether v is a valid HTTP/1.x header name.
|
||||
// HTTP/2 imposes the additional restriction that uppercase ASCII
|
||||
// letters are not allowed.
|
||||
//
|
||||
// RFC 7230 says:
|
||||
// header-field = field-name ":" OWS field-value OWS
|
||||
// field-name = token
|
||||
// token = 1*tchar
|
||||
// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." /
|
||||
// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA
|
||||
func ValidHeaderFieldName(v string) bool {
|
||||
if len(v) == 0 {
|
||||
return false
|
||||
}
|
||||
for _, r := range v {
|
||||
if !IsTokenRune(r) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ValidHostHeader reports whether h is a valid host header.
|
||||
func ValidHostHeader(h string) bool {
|
||||
// The latest spec is actually this:
|
||||
//
|
||||
// http://tools.ietf.org/html/rfc7230#section-5.4
|
||||
// Host = uri-host [ ":" port ]
|
||||
//
|
||||
// Where uri-host is:
|
||||
// http://tools.ietf.org/html/rfc3986#section-3.2.2
|
||||
//
|
||||
// But we're going to be much more lenient for now and just
|
||||
// search for any byte that's not a valid byte in any of those
|
||||
// expressions.
|
||||
for i := 0; i < len(h); i++ {
|
||||
if !validHostByte[h[i]] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// See the validHostHeader comment.
|
||||
var validHostByte = [256]bool{
|
||||
'0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true,
|
||||
'8': true, '9': true,
|
||||
|
||||
'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true,
|
||||
'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true,
|
||||
'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true,
|
||||
'y': true, 'z': true,
|
||||
|
||||
'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true,
|
||||
'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true,
|
||||
'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'V': true, 'W': true, 'X': true,
|
||||
'Y': true, 'Z': true,
|
||||
|
||||
'!': true, // sub-delims
|
||||
'$': true, // sub-delims
|
||||
'%': true, // pct-encoded (and used in IPv6 zones)
|
||||
'&': true, // sub-delims
|
||||
'(': true, // sub-delims
|
||||
')': true, // sub-delims
|
||||
'*': true, // sub-delims
|
||||
'+': true, // sub-delims
|
||||
',': true, // sub-delims
|
||||
'-': true, // unreserved
|
||||
'.': true, // unreserved
|
||||
':': true, // IPv6address + Host expression's optional port
|
||||
';': true, // sub-delims
|
||||
'=': true, // sub-delims
|
||||
'[': true,
|
||||
'\'': true, // sub-delims
|
||||
']': true,
|
||||
'_': true, // unreserved
|
||||
'~': true, // unreserved
|
||||
}
|
||||
|
||||
// ValidHeaderFieldValue reports whether v is a valid "field-value" according to
|
||||
// http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 :
|
||||
//
|
||||
// message-header = field-name ":" [ field-value ]
|
||||
// field-value = *( field-content | LWS )
|
||||
// field-content = <the OCTETs making up the field-value
|
||||
// and consisting of either *TEXT or combinations
|
||||
// of token, separators, and quoted-string>
|
||||
//
|
||||
// http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 :
|
||||
//
|
||||
// TEXT = <any OCTET except CTLs,
|
||||
// but including LWS>
|
||||
// LWS = [CRLF] 1*( SP | HT )
|
||||
// CTL = <any US-ASCII control character
|
||||
// (octets 0 - 31) and DEL (127)>
|
||||
//
|
||||
// RFC 7230 says:
|
||||
// field-value = *( field-content / obs-fold )
|
||||
// obj-fold = N/A to http2, and deprecated
|
||||
// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
|
||||
// field-vchar = VCHAR / obs-text
|
||||
// obs-text = %x80-FF
|
||||
// VCHAR = "any visible [USASCII] character"
|
||||
//
|
||||
// http2 further says: "Similarly, HTTP/2 allows header field values
|
||||
// that are not valid. While most of the values that can be encoded
|
||||
// will not alter header field parsing, carriage return (CR, ASCII
|
||||
// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII
|
||||
// 0x0) might be exploited by an attacker if they are translated
|
||||
// verbatim. Any request or response that contains a character not
|
||||
// permitted in a header field value MUST be treated as malformed
|
||||
// (Section 8.1.2.6). Valid characters are defined by the
|
||||
// field-content ABNF rule in Section 3.2 of [RFC7230]."
|
||||
//
|
||||
// This function does not (yet?) properly handle the rejection of
|
||||
// strings that begin or end with SP or HTAB.
|
||||
func ValidHeaderFieldValue(v string) bool {
|
||||
for i := 0; i < len(v); i++ {
|
||||
b := v[i]
|
||||
if isCTL(b) && !isLWS(b) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func isASCII(s string) bool {
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] >= utf8.RuneSelf {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// PunycodeHostPort returns the IDNA Punycode version
|
||||
// of the provided "host" or "host:port" string.
|
||||
func PunycodeHostPort(v string) (string, error) {
|
||||
if isASCII(v) {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
host, port, err := net.SplitHostPort(v)
|
||||
if err != nil {
|
||||
// The input 'v' argument was just a "host" argument,
|
||||
// without a port. This error should not be returned
|
||||
// to the caller.
|
||||
host = v
|
||||
port = ""
|
||||
}
|
||||
host, err = idna.ToASCII(host)
|
||||
if err != nil {
|
||||
// Non-UTF-8? Not representable in Punycode, in any
|
||||
// case.
|
||||
return "", err
|
||||
}
|
||||
if port == "" {
|
||||
return host, nil
|
||||
}
|
||||
return net.JoinHostPort(host, port), nil
|
||||
}
|
@ -0,0 +1,2 @@
|
||||
*~
|
||||
h2i/h2i
|
@ -0,0 +1,51 @@
|
||||
#
|
||||
# This Dockerfile builds a recent curl with HTTP/2 client support, using
|
||||
# a recent nghttp2 build.
|
||||
#
|
||||
# See the Makefile for how to tag it. If Docker and that image is found, the
|
||||
# Go tests use this curl binary for integration tests.
|
||||
#
|
||||
|
||||
FROM ubuntu:trusty
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get upgrade -y && \
|
||||
apt-get install -y git-core build-essential wget
|
||||
|
||||
RUN apt-get install -y --no-install-recommends \
|
||||
autotools-dev libtool pkg-config zlib1g-dev \
|
||||
libcunit1-dev libssl-dev libxml2-dev libevent-dev \
|
||||
automake autoconf
|
||||
|
||||
# The list of packages nghttp2 recommends for h2load:
|
||||
RUN apt-get install -y --no-install-recommends make binutils \
|
||||
autoconf automake autotools-dev \
|
||||
libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \
|
||||
libev-dev libevent-dev libjansson-dev libjemalloc-dev \
|
||||
cython python3.4-dev python-setuptools
|
||||
|
||||
# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached:
|
||||
ENV NGHTTP2_VER 895da9a
|
||||
RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git
|
||||
|
||||
WORKDIR /root/nghttp2
|
||||
RUN git reset --hard $NGHTTP2_VER
|
||||
RUN autoreconf -i
|
||||
RUN automake
|
||||
RUN autoconf
|
||||
RUN ./configure
|
||||
RUN make
|
||||
RUN make install
|
||||
|
||||
WORKDIR /root
|
||||
RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz
|
||||
RUN tar -zxvf curl-7.45.0.tar.gz
|
||||
WORKDIR /root/curl-7.45.0
|
||||
RUN ./configure --with-ssl --with-nghttp2=/usr/local
|
||||
RUN make
|
||||
RUN make install
|
||||
RUN ldconfig
|
||||
|
||||
CMD ["-h"]
|
||||
ENTRYPOINT ["/usr/local/bin/curl"]
|
||||
|
@ -0,0 +1,3 @@
|
||||
curlimage:
|
||||
docker build -t gohttp2/curl .
|
||||
|
@ -0,0 +1,20 @@
|
||||
This is a work-in-progress HTTP/2 implementation for Go.
|
||||
|
||||
It will eventually live in the Go standard library and won't require
|
||||
any changes to your code to use. It will just be automatic.
|
||||
|
||||
Status:
|
||||
|
||||
* The server support is pretty good. A few things are missing
|
||||
but are being worked on.
|
||||
* The client work has just started but shares a lot of code
|
||||
is coming along much quicker.
|
||||
|
||||
Docs are at https://godoc.org/golang.org/x/net/http2
|
||||
|
||||
Demo test server at https://http2.golang.org/
|
||||
|
||||
Help & bug reports welcome!
|
||||
|
||||
Contributing: https://golang.org/doc/contribute.html
|
||||
Bugs: https://golang.org/issue/new?title=x/net/http2:+
|
@ -0,0 +1,641 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
// A list of the possible cipher suite ids. Taken from
|
||||
// https://www.iana.org/assignments/tls-parameters/tls-parameters.txt
|
||||
|
||||
const (
|
||||
cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000
|
||||
cipher_TLS_RSA_WITH_NULL_MD5 uint16 = 0x0001
|
||||
cipher_TLS_RSA_WITH_NULL_SHA uint16 = 0x0002
|
||||
cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0003
|
||||
cipher_TLS_RSA_WITH_RC4_128_MD5 uint16 = 0x0004
|
||||
cipher_TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005
|
||||
cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x0006
|
||||
cipher_TLS_RSA_WITH_IDEA_CBC_SHA uint16 = 0x0007
|
||||
cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0008
|
||||
cipher_TLS_RSA_WITH_DES_CBC_SHA uint16 = 0x0009
|
||||
cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000A
|
||||
cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000B
|
||||
cipher_TLS_DH_DSS_WITH_DES_CBC_SHA uint16 = 0x000C
|
||||
cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x000D
|
||||
cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000E
|
||||
cipher_TLS_DH_RSA_WITH_DES_CBC_SHA uint16 = 0x000F
|
||||
cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0010
|
||||
cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0011
|
||||
cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA uint16 = 0x0012
|
||||
cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x0013
|
||||
cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0014
|
||||
cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA uint16 = 0x0015
|
||||
cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0016
|
||||
cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0017
|
||||
cipher_TLS_DH_anon_WITH_RC4_128_MD5 uint16 = 0x0018
|
||||
cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0019
|
||||
cipher_TLS_DH_anon_WITH_DES_CBC_SHA uint16 = 0x001A
|
||||
cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0x001B
|
||||
// Reserved uint16 = 0x001C-1D
|
||||
cipher_TLS_KRB5_WITH_DES_CBC_SHA uint16 = 0x001E
|
||||
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA uint16 = 0x001F
|
||||
cipher_TLS_KRB5_WITH_RC4_128_SHA uint16 = 0x0020
|
||||
cipher_TLS_KRB5_WITH_IDEA_CBC_SHA uint16 = 0x0021
|
||||
cipher_TLS_KRB5_WITH_DES_CBC_MD5 uint16 = 0x0022
|
||||
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 uint16 = 0x0023
|
||||
cipher_TLS_KRB5_WITH_RC4_128_MD5 uint16 = 0x0024
|
||||
cipher_TLS_KRB5_WITH_IDEA_CBC_MD5 uint16 = 0x0025
|
||||
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA uint16 = 0x0026
|
||||
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA uint16 = 0x0027
|
||||
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA uint16 = 0x0028
|
||||
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 uint16 = 0x0029
|
||||
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x002A
|
||||
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 uint16 = 0x002B
|
||||
cipher_TLS_PSK_WITH_NULL_SHA uint16 = 0x002C
|
||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA uint16 = 0x002D
|
||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA uint16 = 0x002E
|
||||
cipher_TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002F
|
||||
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0030
|
||||
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0031
|
||||
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0032
|
||||
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0033
|
||||
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA uint16 = 0x0034
|
||||
cipher_TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035
|
||||
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0036
|
||||
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0037
|
||||
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0038
|
||||
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0039
|
||||
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA uint16 = 0x003A
|
||||
cipher_TLS_RSA_WITH_NULL_SHA256 uint16 = 0x003B
|
||||
cipher_TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003C
|
||||
cipher_TLS_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x003D
|
||||
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x003E
|
||||
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003F
|
||||
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x0040
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0041
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0042
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0043
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0044
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0045
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0046
|
||||
// Reserved uint16 = 0x0047-4F
|
||||
// Reserved uint16 = 0x0050-58
|
||||
// Reserved uint16 = 0x0059-5C
|
||||
// Unassigned uint16 = 0x005D-5F
|
||||
// Reserved uint16 = 0x0060-66
|
||||
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x0067
|
||||
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x0068
|
||||
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x0069
|
||||
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x006A
|
||||
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x006B
|
||||
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256 uint16 = 0x006C
|
||||
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256 uint16 = 0x006D
|
||||
// Unassigned uint16 = 0x006E-83
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0084
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0085
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0086
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0087
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0088
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0089
|
||||
cipher_TLS_PSK_WITH_RC4_128_SHA uint16 = 0x008A
|
||||
cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008B
|
||||
cipher_TLS_PSK_WITH_AES_128_CBC_SHA uint16 = 0x008C
|
||||
cipher_TLS_PSK_WITH_AES_256_CBC_SHA uint16 = 0x008D
|
||||
cipher_TLS_DHE_PSK_WITH_RC4_128_SHA uint16 = 0x008E
|
||||
cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008F
|
||||
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0090
|
||||
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0091
|
||||
cipher_TLS_RSA_PSK_WITH_RC4_128_SHA uint16 = 0x0092
|
||||
cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x0093
|
||||
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0094
|
||||
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0095
|
||||
cipher_TLS_RSA_WITH_SEED_CBC_SHA uint16 = 0x0096
|
||||
cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA uint16 = 0x0097
|
||||
cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA uint16 = 0x0098
|
||||
cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA uint16 = 0x0099
|
||||
cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA uint16 = 0x009A
|
||||
cipher_TLS_DH_anon_WITH_SEED_CBC_SHA uint16 = 0x009B
|
||||
cipher_TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009C
|
||||
cipher_TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009D
|
||||
cipher_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009E
|
||||
cipher_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009F
|
||||
cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x00A0
|
||||
cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x00A1
|
||||
cipher_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A2
|
||||
cipher_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A3
|
||||
cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A4
|
||||
cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A5
|
||||
cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256 uint16 = 0x00A6
|
||||
cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384 uint16 = 0x00A7
|
||||
cipher_TLS_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00A8
|
||||
cipher_TLS_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00A9
|
||||
cipher_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AA
|
||||
cipher_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AB
|
||||
cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AC
|
||||
cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AD
|
||||
cipher_TLS_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00AE
|
||||
cipher_TLS_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00AF
|
||||
cipher_TLS_PSK_WITH_NULL_SHA256 uint16 = 0x00B0
|
||||
cipher_TLS_PSK_WITH_NULL_SHA384 uint16 = 0x00B1
|
||||
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B2
|
||||
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B3
|
||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA256 uint16 = 0x00B4
|
||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA384 uint16 = 0x00B5
|
||||
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B6
|
||||
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B7
|
||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA256 uint16 = 0x00B8
|
||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA384 uint16 = 0x00B9
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BA
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BB
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BC
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BD
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BE
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BF
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C0
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C1
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C2
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C3
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C4
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C5
|
||||
// Unassigned uint16 = 0x00C6-FE
|
||||
cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV uint16 = 0x00FF
|
||||
// Unassigned uint16 = 0x01-55,*
|
||||
cipher_TLS_FALLBACK_SCSV uint16 = 0x5600
|
||||
// Unassigned uint16 = 0x5601 - 0xC000
|
||||
cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA uint16 = 0xC001
|
||||
cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA uint16 = 0xC002
|
||||
cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC003
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC004
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC005
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA uint16 = 0xC006
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xC007
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC008
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC009
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC00A
|
||||
cipher_TLS_ECDH_RSA_WITH_NULL_SHA uint16 = 0xC00B
|
||||
cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA uint16 = 0xC00C
|
||||
cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC00D
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC00E
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC00F
|
||||
cipher_TLS_ECDHE_RSA_WITH_NULL_SHA uint16 = 0xC010
|
||||
cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xC011
|
||||
cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC012
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC013
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC014
|
||||
cipher_TLS_ECDH_anon_WITH_NULL_SHA uint16 = 0xC015
|
||||
cipher_TLS_ECDH_anon_WITH_RC4_128_SHA uint16 = 0xC016
|
||||
cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0xC017
|
||||
cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA uint16 = 0xC018
|
||||
cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA uint16 = 0xC019
|
||||
cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01A
|
||||
cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01B
|
||||
cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01C
|
||||
cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA uint16 = 0xC01D
|
||||
cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC01E
|
||||
cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA uint16 = 0xC01F
|
||||
cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA uint16 = 0xC020
|
||||
cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC021
|
||||
cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA uint16 = 0xC022
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC023
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC024
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC025
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC026
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC027
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC028
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC029
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC02A
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02B
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02C
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02D
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02E
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02F
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC030
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC031
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC032
|
||||
cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA uint16 = 0xC033
|
||||
cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0xC034
|
||||
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0xC035
|
||||
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0xC036
|
||||
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0xC037
|
||||
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0xC038
|
||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA uint16 = 0xC039
|
||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256 uint16 = 0xC03A
|
||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384 uint16 = 0xC03B
|
||||
cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03C
|
||||
cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03D
|
||||
cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03E
|
||||
cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03F
|
||||
cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC040
|
||||
cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC041
|
||||
cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC042
|
||||
cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC043
|
||||
cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC044
|
||||
cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC045
|
||||
cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC046
|
||||
cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC047
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC048
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC049
|
||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04A
|
||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04B
|
||||
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04C
|
||||
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04D
|
||||
cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04E
|
||||
cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04F
|
||||
cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC050
|
||||
cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC051
|
||||
cipher_TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC052
|
||||
cipher_TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC053
|
||||
cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC054
|
||||
cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC055
|
||||
cipher_TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC056
|
||||
cipher_TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC057
|
||||
cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC058
|
||||
cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC059
|
||||
cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05A
|
||||
cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05B
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05C
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05D
|
||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05E
|
||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05F
|
||||
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC060
|
||||
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC061
|
||||
cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC062
|
||||
cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC063
|
||||
cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC064
|
||||
cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC065
|
||||
cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC066
|
||||
cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC067
|
||||
cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC068
|
||||
cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC069
|
||||
cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06A
|
||||
cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06B
|
||||
cipher_TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06C
|
||||
cipher_TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06D
|
||||
cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06E
|
||||
cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06F
|
||||
cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC070
|
||||
cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC071
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC072
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC073
|
||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC074
|
||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC075
|
||||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC076
|
||||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC077
|
||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC078
|
||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC079
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07A
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07B
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07C
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07D
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07E
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07F
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC080
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC081
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC082
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC083
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC084
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC085
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC086
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC087
|
||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC088
|
||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC089
|
||||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08A
|
||||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08B
|
||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08C
|
||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08D
|
||||
cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08E
|
||||
cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08F
|
||||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC090
|
||||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC091
|
||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC092
|
||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC093
|
||||
cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC094
|
||||
cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC095
|
||||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC096
|
||||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC097
|
||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC098
|
||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC099
|
||||
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC09A
|
||||
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC09B
|
||||
cipher_TLS_RSA_WITH_AES_128_CCM uint16 = 0xC09C
|
||||
cipher_TLS_RSA_WITH_AES_256_CCM uint16 = 0xC09D
|
||||
cipher_TLS_DHE_RSA_WITH_AES_128_CCM uint16 = 0xC09E
|
||||
cipher_TLS_DHE_RSA_WITH_AES_256_CCM uint16 = 0xC09F
|
||||
cipher_TLS_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A0
|
||||
cipher_TLS_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A1
|
||||
cipher_TLS_DHE_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A2
|
||||
cipher_TLS_DHE_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A3
|
||||
cipher_TLS_PSK_WITH_AES_128_CCM uint16 = 0xC0A4
|
||||
cipher_TLS_PSK_WITH_AES_256_CCM uint16 = 0xC0A5
|
||||
cipher_TLS_DHE_PSK_WITH_AES_128_CCM uint16 = 0xC0A6
|
||||
cipher_TLS_DHE_PSK_WITH_AES_256_CCM uint16 = 0xC0A7
|
||||
cipher_TLS_PSK_WITH_AES_128_CCM_8 uint16 = 0xC0A8
|
||||
cipher_TLS_PSK_WITH_AES_256_CCM_8 uint16 = 0xC0A9
|
||||
cipher_TLS_PSK_DHE_WITH_AES_128_CCM_8 uint16 = 0xC0AA
|
||||
cipher_TLS_PSK_DHE_WITH_AES_256_CCM_8 uint16 = 0xC0AB
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM uint16 = 0xC0AC
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM uint16 = 0xC0AD
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 uint16 = 0xC0AE
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 uint16 = 0xC0AF
|
||||
// Unassigned uint16 = 0xC0B0-FF
|
||||
// Unassigned uint16 = 0xC1-CB,*
|
||||
// Unassigned uint16 = 0xCC00-A7
|
||||
cipher_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA8
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA9
|
||||
cipher_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAA
|
||||
cipher_TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAB
|
||||
cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAC
|
||||
cipher_TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAD
|
||||
cipher_TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAE
|
||||
)
|
||||
|
||||
// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
|
||||
// References:
|
||||
// https://tools.ietf.org/html/rfc7540#appendix-A
|
||||
// Reject cipher suites from Appendix A.
|
||||
// "This list includes those cipher suites that do not
|
||||
// offer an ephemeral key exchange and those that are
|
||||
// based on the TLS null, stream or block cipher type"
|
||||
func isBadCipher(cipher uint16) bool {
|
||||
switch cipher {
|
||||
case cipher_TLS_NULL_WITH_NULL_NULL,
|
||||
cipher_TLS_RSA_WITH_NULL_MD5,
|
||||
cipher_TLS_RSA_WITH_NULL_SHA,
|
||||
cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5,
|
||||
cipher_TLS_RSA_WITH_RC4_128_MD5,
|
||||
cipher_TLS_RSA_WITH_RC4_128_SHA,
|
||||
cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,
|
||||
cipher_TLS_RSA_WITH_IDEA_CBC_SHA,
|
||||
cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA,
|
||||
cipher_TLS_RSA_WITH_DES_CBC_SHA,
|
||||
cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_WITH_DES_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_WITH_DES_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5,
|
||||
cipher_TLS_DH_anon_WITH_RC4_128_MD5,
|
||||
cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA,
|
||||
cipher_TLS_DH_anon_WITH_DES_CBC_SHA,
|
||||
cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_KRB5_WITH_DES_CBC_SHA,
|
||||
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_KRB5_WITH_RC4_128_SHA,
|
||||
cipher_TLS_KRB5_WITH_IDEA_CBC_SHA,
|
||||
cipher_TLS_KRB5_WITH_DES_CBC_MD5,
|
||||
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5,
|
||||
cipher_TLS_KRB5_WITH_RC4_128_MD5,
|
||||
cipher_TLS_KRB5_WITH_IDEA_CBC_MD5,
|
||||
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA,
|
||||
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA,
|
||||
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA,
|
||||
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5,
|
||||
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5,
|
||||
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5,
|
||||
cipher_TLS_PSK_WITH_NULL_SHA,
|
||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA,
|
||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA,
|
||||
cipher_TLS_RSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_RSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_RSA_WITH_NULL_SHA256,
|
||||
cipher_TLS_RSA_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_RSA_WITH_AES_256_CBC_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA,
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256,
|
||||
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,
|
||||
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256,
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA,
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA,
|
||||
cipher_TLS_PSK_WITH_RC4_128_SHA,
|
||||
cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_PSK_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_PSK_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_DHE_PSK_WITH_RC4_128_SHA,
|
||||
cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_RSA_PSK_WITH_RC4_128_SHA,
|
||||
cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_RSA_WITH_SEED_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA,
|
||||
cipher_TLS_DH_anon_WITH_SEED_CBC_SHA,
|
||||
cipher_TLS_RSA_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_RSA_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_PSK_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_PSK_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_PSK_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_PSK_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_PSK_WITH_NULL_SHA256,
|
||||
cipher_TLS_PSK_WITH_NULL_SHA384,
|
||||
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA256,
|
||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA384,
|
||||
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA256,
|
||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA384,
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256,
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256,
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256,
|
||||
cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_ECDH_RSA_WITH_NULL_SHA,
|
||||
cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA,
|
||||
cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_ECDHE_RSA_WITH_NULL_SHA,
|
||||
cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA,
|
||||
cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_ECDH_anon_WITH_NULL_SHA,
|
||||
cipher_TLS_ECDH_anon_WITH_RC4_128_SHA,
|
||||
cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA,
|
||||
cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA,
|
||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256,
|
||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384,
|
||||
cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_RSA_WITH_AES_128_CCM,
|
||||
cipher_TLS_RSA_WITH_AES_256_CCM,
|
||||
cipher_TLS_RSA_WITH_AES_128_CCM_8,
|
||||
cipher_TLS_RSA_WITH_AES_256_CCM_8,
|
||||
cipher_TLS_PSK_WITH_AES_128_CCM,
|
||||
cipher_TLS_PSK_WITH_AES_256_CCM,
|
||||
cipher_TLS_PSK_WITH_AES_128_CCM_8,
|
||||
cipher_TLS_PSK_WITH_AES_256_CCM_8:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
@ -0,0 +1,282 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Transport code's client connection pooling.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net/http"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ClientConnPool manages a pool of HTTP/2 client connections.
|
||||
type ClientConnPool interface {
|
||||
GetClientConn(req *http.Request, addr string) (*ClientConn, error)
|
||||
MarkDead(*ClientConn)
|
||||
}
|
||||
|
||||
// clientConnPoolIdleCloser is the interface implemented by ClientConnPool
|
||||
// implementations which can close their idle connections.
|
||||
type clientConnPoolIdleCloser interface {
|
||||
ClientConnPool
|
||||
closeIdleConnections()
|
||||
}
|
||||
|
||||
var (
|
||||
_ clientConnPoolIdleCloser = (*clientConnPool)(nil)
|
||||
_ clientConnPoolIdleCloser = noDialClientConnPool{}
|
||||
)
|
||||
|
||||
// TODO: use singleflight for dialing and addConnCalls?
|
||||
type clientConnPool struct {
|
||||
t *Transport
|
||||
|
||||
mu sync.Mutex // TODO: maybe switch to RWMutex
|
||||
// TODO: add support for sharing conns based on cert names
|
||||
// (e.g. share conn for googleapis.com and appspot.com)
|
||||
conns map[string][]*ClientConn // key is host:port
|
||||
dialing map[string]*dialCall // currently in-flight dials
|
||||
keys map[*ClientConn][]string
|
||||
addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls
|
||||
}
|
||||
|
||||
func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
|
||||
return p.getClientConn(req, addr, dialOnMiss)
|
||||
}
|
||||
|
||||
const (
|
||||
dialOnMiss = true
|
||||
noDialOnMiss = false
|
||||
)
|
||||
|
||||
// shouldTraceGetConn reports whether getClientConn should call any
|
||||
// ClientTrace.GetConn hook associated with the http.Request.
|
||||
//
|
||||
// This complexity is needed to avoid double calls of the GetConn hook
|
||||
// during the back-and-forth between net/http and x/net/http2 (when the
|
||||
// net/http.Transport is upgraded to also speak http2), as well as support
|
||||
// the case where x/net/http2 is being used directly.
|
||||
func (p *clientConnPool) shouldTraceGetConn(st clientConnIdleState) bool {
|
||||
// If our Transport wasn't made via ConfigureTransport, always
|
||||
// trace the GetConn hook if provided, because that means the
|
||||
// http2 package is being used directly and it's the one
|
||||
// dialing, as opposed to net/http.
|
||||
if _, ok := p.t.ConnPool.(noDialClientConnPool); !ok {
|
||||
return true
|
||||
}
|
||||
// Otherwise, only use the GetConn hook if this connection has
|
||||
// been used previously for other requests. For fresh
|
||||
// connections, the net/http package does the dialing.
|
||||
return !st.freshConn
|
||||
}
|
||||
|
||||
func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {
|
||||
if isConnectionCloseRequest(req) && dialOnMiss {
|
||||
// It gets its own connection.
|
||||
traceGetConn(req, addr)
|
||||
const singleUse = true
|
||||
cc, err := p.t.dialClientConn(addr, singleUse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cc, nil
|
||||
}
|
||||
p.mu.Lock()
|
||||
for _, cc := range p.conns[addr] {
|
||||
if st := cc.idleState(); st.canTakeNewRequest {
|
||||
if p.shouldTraceGetConn(st) {
|
||||
traceGetConn(req, addr)
|
||||
}
|
||||
p.mu.Unlock()
|
||||
return cc, nil
|
||||
}
|
||||
}
|
||||
if !dialOnMiss {
|
||||
p.mu.Unlock()
|
||||
return nil, ErrNoCachedConn
|
||||
}
|
||||
traceGetConn(req, addr)
|
||||
call := p.getStartDialLocked(addr)
|
||||
p.mu.Unlock()
|
||||
<-call.done
|
||||
return call.res, call.err
|
||||
}
|
||||
|
||||
// dialCall is an in-flight Transport dial call to a host.
|
||||
type dialCall struct {
|
||||
p *clientConnPool
|
||||
done chan struct{} // closed when done
|
||||
res *ClientConn // valid after done is closed
|
||||
err error // valid after done is closed
|
||||
}
|
||||
|
||||
// requires p.mu is held.
|
||||
func (p *clientConnPool) getStartDialLocked(addr string) *dialCall {
|
||||
if call, ok := p.dialing[addr]; ok {
|
||||
// A dial is already in-flight. Don't start another.
|
||||
return call
|
||||
}
|
||||
call := &dialCall{p: p, done: make(chan struct{})}
|
||||
if p.dialing == nil {
|
||||
p.dialing = make(map[string]*dialCall)
|
||||
}
|
||||
p.dialing[addr] = call
|
||||
go call.dial(addr)
|
||||
return call
|
||||
}
|
||||
|
||||
// run in its own goroutine.
|
||||
func (c *dialCall) dial(addr string) {
|
||||
const singleUse = false // shared conn
|
||||
c.res, c.err = c.p.t.dialClientConn(addr, singleUse)
|
||||
close(c.done)
|
||||
|
||||
c.p.mu.Lock()
|
||||
delete(c.p.dialing, addr)
|
||||
if c.err == nil {
|
||||
c.p.addConnLocked(addr, c.res)
|
||||
}
|
||||
c.p.mu.Unlock()
|
||||
}
|
||||
|
||||
// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't
|
||||
// already exist. It coalesces concurrent calls with the same key.
|
||||
// This is used by the http1 Transport code when it creates a new connection. Because
|
||||
// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know
|
||||
// the protocol), it can get into a situation where it has multiple TLS connections.
|
||||
// This code decides which ones live or die.
|
||||
// The return value used is whether c was used.
|
||||
// c is never closed.
|
||||
func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) {
|
||||
p.mu.Lock()
|
||||
for _, cc := range p.conns[key] {
|
||||
if cc.CanTakeNewRequest() {
|
||||
p.mu.Unlock()
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
call, dup := p.addConnCalls[key]
|
||||
if !dup {
|
||||
if p.addConnCalls == nil {
|
||||
p.addConnCalls = make(map[string]*addConnCall)
|
||||
}
|
||||
call = &addConnCall{
|
||||
p: p,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
p.addConnCalls[key] = call
|
||||
go call.run(t, key, c)
|
||||
}
|
||||
p.mu.Unlock()
|
||||
|
||||
<-call.done
|
||||
if call.err != nil {
|
||||
return false, call.err
|
||||
}
|
||||
return !dup, nil
|
||||
}
|
||||
|
||||
type addConnCall struct {
|
||||
p *clientConnPool
|
||||
done chan struct{} // closed when done
|
||||
err error
|
||||
}
|
||||
|
||||
func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) {
|
||||
cc, err := t.NewClientConn(tc)
|
||||
|
||||
p := c.p
|
||||
p.mu.Lock()
|
||||
if err != nil {
|
||||
c.err = err
|
||||
} else {
|
||||
p.addConnLocked(key, cc)
|
||||
}
|
||||
delete(p.addConnCalls, key)
|
||||
p.mu.Unlock()
|
||||
close(c.done)
|
||||
}
|
||||
|
||||
func (p *clientConnPool) addConn(key string, cc *ClientConn) {
|
||||
p.mu.Lock()
|
||||
p.addConnLocked(key, cc)
|
||||
p.mu.Unlock()
|
||||
}
|
||||
|
||||
// p.mu must be held
|
||||
func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) {
|
||||
for _, v := range p.conns[key] {
|
||||
if v == cc {
|
||||
return
|
||||
}
|
||||
}
|
||||
if p.conns == nil {
|
||||
p.conns = make(map[string][]*ClientConn)
|
||||
}
|
||||
if p.keys == nil {
|
||||
p.keys = make(map[*ClientConn][]string)
|
||||
}
|
||||
p.conns[key] = append(p.conns[key], cc)
|
||||
p.keys[cc] = append(p.keys[cc], key)
|
||||
}
|
||||
|
||||
func (p *clientConnPool) MarkDead(cc *ClientConn) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
for _, key := range p.keys[cc] {
|
||||
vv, ok := p.conns[key]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
newList := filterOutClientConn(vv, cc)
|
||||
if len(newList) > 0 {
|
||||
p.conns[key] = newList
|
||||
} else {
|
||||
delete(p.conns, key)
|
||||
}
|
||||
}
|
||||
delete(p.keys, cc)
|
||||
}
|
||||
|
||||
func (p *clientConnPool) closeIdleConnections() {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
// TODO: don't close a cc if it was just added to the pool
|
||||
// milliseconds ago and has never been used. There's currently
|
||||
// a small race window with the HTTP/1 Transport's integration
|
||||
// where it can add an idle conn just before using it, and
|
||||
// somebody else can concurrently call CloseIdleConns and
|
||||
// break some caller's RoundTrip.
|
||||
for _, vv := range p.conns {
|
||||
for _, cc := range vv {
|
||||
cc.closeIfIdle()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn {
|
||||
out := in[:0]
|
||||
for _, v := range in {
|
||||
if v != exclude {
|
||||
out = append(out, v)
|
||||
}
|
||||
}
|
||||
// If we filtered it out, zero out the last item to prevent
|
||||
// the GC from seeing it.
|
||||
if len(in) != len(out) {
|
||||
in[len(in)-1] = nil
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// noDialClientConnPool is an implementation of http2.ClientConnPool
|
||||
// which never dials. We let the HTTP/1.1 client dial and use its TLS
|
||||
// connection instead.
|
||||
type noDialClientConnPool struct{ *clientConnPool }
|
||||
|
||||
func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
|
||||
return p.getClientConn(req, addr, noDialOnMiss)
|
||||
}
|
@ -0,0 +1,146 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Buffer chunks are allocated from a pool to reduce pressure on GC.
|
||||
// The maximum wasted space per dataBuffer is 2x the largest size class,
|
||||
// which happens when the dataBuffer has multiple chunks and there is
|
||||
// one unread byte in both the first and last chunks. We use a few size
|
||||
// classes to minimize overheads for servers that typically receive very
|
||||
// small request bodies.
|
||||
//
|
||||
// TODO: Benchmark to determine if the pools are necessary. The GC may have
|
||||
// improved enough that we can instead allocate chunks like this:
|
||||
// make([]byte, max(16<<10, expectedBytesRemaining))
|
||||
var (
|
||||
dataChunkSizeClasses = []int{
|
||||
1 << 10,
|
||||
2 << 10,
|
||||
4 << 10,
|
||||
8 << 10,
|
||||
16 << 10,
|
||||
}
|
||||
dataChunkPools = [...]sync.Pool{
|
||||
{New: func() interface{} { return make([]byte, 1<<10) }},
|
||||
{New: func() interface{} { return make([]byte, 2<<10) }},
|
||||
{New: func() interface{} { return make([]byte, 4<<10) }},
|
||||
{New: func() interface{} { return make([]byte, 8<<10) }},
|
||||
{New: func() interface{} { return make([]byte, 16<<10) }},
|
||||
}
|
||||
)
|
||||
|
||||
func getDataBufferChunk(size int64) []byte {
|
||||
i := 0
|
||||
for ; i < len(dataChunkSizeClasses)-1; i++ {
|
||||
if size <= int64(dataChunkSizeClasses[i]) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return dataChunkPools[i].Get().([]byte)
|
||||
}
|
||||
|
||||
func putDataBufferChunk(p []byte) {
|
||||
for i, n := range dataChunkSizeClasses {
|
||||
if len(p) == n {
|
||||
dataChunkPools[i].Put(p)
|
||||
return
|
||||
}
|
||||
}
|
||||
panic(fmt.Sprintf("unexpected buffer len=%v", len(p)))
|
||||
}
|
||||
|
||||
// dataBuffer is an io.ReadWriter backed by a list of data chunks.
|
||||
// Each dataBuffer is used to read DATA frames on a single stream.
|
||||
// The buffer is divided into chunks so the server can limit the
|
||||
// total memory used by a single connection without limiting the
|
||||
// request body size on any single stream.
|
||||
type dataBuffer struct {
|
||||
chunks [][]byte
|
||||
r int // next byte to read is chunks[0][r]
|
||||
w int // next byte to write is chunks[len(chunks)-1][w]
|
||||
size int // total buffered bytes
|
||||
expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0)
|
||||
}
|
||||
|
||||
var errReadEmpty = errors.New("read from empty dataBuffer")
|
||||
|
||||
// Read copies bytes from the buffer into p.
|
||||
// It is an error to read when no data is available.
|
||||
func (b *dataBuffer) Read(p []byte) (int, error) {
|
||||
if b.size == 0 {
|
||||
return 0, errReadEmpty
|
||||
}
|
||||
var ntotal int
|
||||
for len(p) > 0 && b.size > 0 {
|
||||
readFrom := b.bytesFromFirstChunk()
|
||||
n := copy(p, readFrom)
|
||||
p = p[n:]
|
||||
ntotal += n
|
||||
b.r += n
|
||||
b.size -= n
|
||||
// If the first chunk has been consumed, advance to the next chunk.
|
||||
if b.r == len(b.chunks[0]) {
|
||||
putDataBufferChunk(b.chunks[0])
|
||||
end := len(b.chunks) - 1
|
||||
copy(b.chunks[:end], b.chunks[1:])
|
||||
b.chunks[end] = nil
|
||||
b.chunks = b.chunks[:end]
|
||||
b.r = 0
|
||||
}
|
||||
}
|
||||
return ntotal, nil
|
||||
}
|
||||
|
||||
func (b *dataBuffer) bytesFromFirstChunk() []byte {
|
||||
if len(b.chunks) == 1 {
|
||||
return b.chunks[0][b.r:b.w]
|
||||
}
|
||||
return b.chunks[0][b.r:]
|
||||
}
|
||||
|
||||
// Len returns the number of bytes of the unread portion of the buffer.
|
||||
func (b *dataBuffer) Len() int {
|
||||
return b.size
|
||||
}
|
||||
|
||||
// Write appends p to the buffer.
|
||||
func (b *dataBuffer) Write(p []byte) (int, error) {
|
||||
ntotal := len(p)
|
||||
for len(p) > 0 {
|
||||
// If the last chunk is empty, allocate a new chunk. Try to allocate
|
||||
// enough to fully copy p plus any additional bytes we expect to
|
||||
// receive. However, this may allocate less than len(p).
|
||||
want := int64(len(p))
|
||||
if b.expected > want {
|
||||
want = b.expected
|
||||
}
|
||||
chunk := b.lastChunkOrAlloc(want)
|
||||
n := copy(chunk[b.w:], p)
|
||||
p = p[n:]
|
||||
b.w += n
|
||||
b.size += n
|
||||
b.expected -= int64(n)
|
||||
}
|
||||
return ntotal, nil
|
||||
}
|
||||
|
||||
func (b *dataBuffer) lastChunkOrAlloc(want int64) []byte {
|
||||
if len(b.chunks) != 0 {
|
||||
last := b.chunks[len(b.chunks)-1]
|
||||
if b.w < len(last) {
|
||||
return last
|
||||
}
|
||||
}
|
||||
chunk := getDataBufferChunk(want)
|
||||
b.chunks = append(b.chunks, chunk)
|
||||
b.w = 0
|
||||
return chunk
|
||||
}
|
@ -0,0 +1,133 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec.
|
||||
type ErrCode uint32
|
||||
|
||||
const (
|
||||
ErrCodeNo ErrCode = 0x0
|
||||
ErrCodeProtocol ErrCode = 0x1
|
||||
ErrCodeInternal ErrCode = 0x2
|
||||
ErrCodeFlowControl ErrCode = 0x3
|
||||
ErrCodeSettingsTimeout ErrCode = 0x4
|
||||
ErrCodeStreamClosed ErrCode = 0x5
|
||||
ErrCodeFrameSize ErrCode = 0x6
|
||||
ErrCodeRefusedStream ErrCode = 0x7
|
||||
ErrCodeCancel ErrCode = 0x8
|
||||
ErrCodeCompression ErrCode = 0x9
|
||||
ErrCodeConnect ErrCode = 0xa
|
||||
ErrCodeEnhanceYourCalm ErrCode = 0xb
|
||||
ErrCodeInadequateSecurity ErrCode = 0xc
|
||||
ErrCodeHTTP11Required ErrCode = 0xd
|
||||
)
|
||||
|
||||
var errCodeName = map[ErrCode]string{
|
||||
ErrCodeNo: "NO_ERROR",
|
||||
ErrCodeProtocol: "PROTOCOL_ERROR",
|
||||
ErrCodeInternal: "INTERNAL_ERROR",
|
||||
ErrCodeFlowControl: "FLOW_CONTROL_ERROR",
|
||||
ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT",
|
||||
ErrCodeStreamClosed: "STREAM_CLOSED",
|
||||
ErrCodeFrameSize: "FRAME_SIZE_ERROR",
|
||||
ErrCodeRefusedStream: "REFUSED_STREAM",
|
||||
ErrCodeCancel: "CANCEL",
|
||||
ErrCodeCompression: "COMPRESSION_ERROR",
|
||||
ErrCodeConnect: "CONNECT_ERROR",
|
||||
ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM",
|
||||
ErrCodeInadequateSecurity: "INADEQUATE_SECURITY",
|
||||
ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED",
|
||||
}
|
||||
|
||||
func (e ErrCode) String() string {
|
||||
if s, ok := errCodeName[e]; ok {
|
||||
return s
|
||||
}
|
||||
return fmt.Sprintf("unknown error code 0x%x", uint32(e))
|
||||
}
|
||||
|
||||
// ConnectionError is an error that results in the termination of the
|
||||
// entire connection.
|
||||
type ConnectionError ErrCode
|
||||
|
||||
func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) }
|
||||
|
||||
// StreamError is an error that only affects one stream within an
|
||||
// HTTP/2 connection.
|
||||
type StreamError struct {
|
||||
StreamID uint32
|
||||
Code ErrCode
|
||||
Cause error // optional additional detail
|
||||
}
|
||||
|
||||
func streamError(id uint32, code ErrCode) StreamError {
|
||||
return StreamError{StreamID: id, Code: code}
|
||||
}
|
||||
|
||||
func (e StreamError) Error() string {
|
||||
if e.Cause != nil {
|
||||
return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause)
|
||||
}
|
||||
return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code)
|
||||
}
|
||||
|
||||
// 6.9.1 The Flow Control Window
|
||||
// "If a sender receives a WINDOW_UPDATE that causes a flow control
|
||||
// window to exceed this maximum it MUST terminate either the stream
|
||||
// or the connection, as appropriate. For streams, [...]; for the
|
||||
// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code."
|
||||
type goAwayFlowError struct{}
|
||||
|
||||
func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" }
|
||||
|
||||
// connError represents an HTTP/2 ConnectionError error code, along
|
||||
// with a string (for debugging) explaining why.
|
||||
//
|
||||
// Errors of this type are only returned by the frame parser functions
|
||||
// and converted into ConnectionError(Code), after stashing away
|
||||
// the Reason into the Framer's errDetail field, accessible via
|
||||
// the (*Framer).ErrorDetail method.
|
||||
type connError struct {
|
||||
Code ErrCode // the ConnectionError error code
|
||||
Reason string // additional reason
|
||||
}
|
||||
|
||||
func (e connError) Error() string {
|
||||
return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason)
|
||||
}
|
||||
|
||||
type pseudoHeaderError string
|
||||
|
||||
func (e pseudoHeaderError) Error() string {
|
||||
return fmt.Sprintf("invalid pseudo-header %q", string(e))
|
||||
}
|
||||
|
||||
type duplicatePseudoHeaderError string
|
||||
|
||||
func (e duplicatePseudoHeaderError) Error() string {
|
||||
return fmt.Sprintf("duplicate pseudo-header %q", string(e))
|
||||
}
|
||||
|
||||
type headerFieldNameError string
|
||||
|
||||
func (e headerFieldNameError) Error() string {
|
||||
return fmt.Sprintf("invalid header field name %q", string(e))
|
||||
}
|
||||
|
||||
type headerFieldValueError string
|
||||
|
||||
func (e headerFieldValueError) Error() string {
|
||||
return fmt.Sprintf("invalid header field value %q", string(e))
|
||||
}
|
||||
|
||||
var (
|
||||
errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers")
|
||||
errPseudoAfterRegular = errors.New("pseudo header field after regular")
|
||||
)
|
@ -0,0 +1,50 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Flow control
|
||||
|
||||
package http2
|
||||
|
||||
// flow is the flow control window's size.
|
||||
type flow struct {
|
||||
// n is the number of DATA bytes we're allowed to send.
|
||||
// A flow is kept both on a conn and a per-stream.
|
||||
n int32
|
||||
|
||||
// conn points to the shared connection-level flow that is
|
||||
// shared by all streams on that conn. It is nil for the flow
|
||||
// that's on the conn directly.
|
||||
conn *flow
|
||||
}
|
||||
|
||||
func (f *flow) setConnFlow(cf *flow) { f.conn = cf }
|
||||
|
||||
func (f *flow) available() int32 {
|
||||
n := f.n
|
||||
if f.conn != nil && f.conn.n < n {
|
||||
n = f.conn.n
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (f *flow) take(n int32) {
|
||||
if n > f.available() {
|
||||
panic("internal error: took too much")
|
||||
}
|
||||
f.n -= n
|
||||
if f.conn != nil {
|
||||
f.conn.n -= n
|
||||
}
|
||||
}
|
||||
|
||||
// add adds n bytes (positive or negative) to the flow control window.
|
||||
// It returns false if the sum would exceed 2^31-1.
|
||||
func (f *flow) add(n int32) bool {
|
||||
sum := f.n + n
|
||||
if (sum > n) == (f.n > 0) {
|
||||
f.n = sum
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,29 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.11
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"net/http/httptrace"
|
||||
"net/textproto"
|
||||
)
|
||||
|
||||
func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool {
|
||||
return trace != nil && trace.WroteHeaderField != nil
|
||||
}
|
||||
|
||||
func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {
|
||||
if trace != nil && trace.WroteHeaderField != nil {
|
||||
trace.WroteHeaderField(k, []string{v})
|
||||
}
|
||||
}
|
||||
|
||||
func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
|
||||
if trace != nil {
|
||||
return trace.Got1xxResponse
|
||||
}
|
||||
return nil
|
||||
}
|
@ -0,0 +1,170 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Defensive debug-only utility to track that functions run on the
|
||||
// goroutine that they're supposed to.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1"
|
||||
|
||||
type goroutineLock uint64
|
||||
|
||||
func newGoroutineLock() goroutineLock {
|
||||
if !DebugGoroutines {
|
||||
return 0
|
||||
}
|
||||
return goroutineLock(curGoroutineID())
|
||||
}
|
||||
|
||||
func (g goroutineLock) check() {
|
||||
if !DebugGoroutines {
|
||||
return
|
||||
}
|
||||
if curGoroutineID() != uint64(g) {
|
||||
panic("running on the wrong goroutine")
|
||||
}
|
||||
}
|
||||
|
||||
func (g goroutineLock) checkNotOn() {
|
||||
if !DebugGoroutines {
|
||||
return
|
||||
}
|
||||
if curGoroutineID() == uint64(g) {
|
||||
panic("running on the wrong goroutine")
|
||||
}
|
||||
}
|
||||
|
||||
var goroutineSpace = []byte("goroutine ")
|
||||
|
||||
func curGoroutineID() uint64 {
|
||||
bp := littleBuf.Get().(*[]byte)
|
||||
defer littleBuf.Put(bp)
|
||||
b := *bp
|
||||
b = b[:runtime.Stack(b, false)]
|
||||
// Parse the 4707 out of "goroutine 4707 ["
|
||||
b = bytes.TrimPrefix(b, goroutineSpace)
|
||||
i := bytes.IndexByte(b, ' ')
|
||||
if i < 0 {
|
||||
panic(fmt.Sprintf("No space found in %q", b))
|
||||
}
|
||||
b = b[:i]
|
||||
n, err := parseUintBytes(b, 10, 64)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
var littleBuf = sync.Pool{
|
||||
New: func() interface{} {
|
||||
buf := make([]byte, 64)
|
||||
return &buf
|
||||
},
|
||||
}
|
||||
|
||||
// parseUintBytes is like strconv.ParseUint, but using a []byte.
|
||||
func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) {
|
||||
var cutoff, maxVal uint64
|
||||
|
||||
if bitSize == 0 {
|
||||
bitSize = int(strconv.IntSize)
|
||||
}
|
||||
|
||||
s0 := s
|
||||
switch {
|
||||
case len(s) < 1:
|
||||
err = strconv.ErrSyntax
|
||||
goto Error
|
||||
|
||||
case 2 <= base && base <= 36:
|
||||
// valid base; nothing to do
|
||||
|
||||
case base == 0:
|
||||
// Look for octal, hex prefix.
|
||||
switch {
|
||||
case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'):
|
||||
base = 16
|
||||
s = s[2:]
|
||||
if len(s) < 1 {
|
||||
err = strconv.ErrSyntax
|
||||
goto Error
|
||||
}
|
||||
case s[0] == '0':
|
||||
base = 8
|
||||
default:
|
||||
base = 10
|
||||
}
|
||||
|
||||
default:
|
||||
err = errors.New("invalid base " + strconv.Itoa(base))
|
||||
goto Error
|
||||
}
|
||||
|
||||
n = 0
|
||||
cutoff = cutoff64(base)
|
||||
maxVal = 1<<uint(bitSize) - 1
|
||||
|
||||
for i := 0; i < len(s); i++ {
|
||||
var v byte
|
||||
d := s[i]
|
||||
switch {
|
||||
case '0' <= d && d <= '9':
|
||||
v = d - '0'
|
||||
case 'a' <= d && d <= 'z':
|
||||
v = d - 'a' + 10
|
||||
case 'A' <= d && d <= 'Z':
|
||||
v = d - 'A' + 10
|
||||
default:
|
||||
n = 0
|
||||
err = strconv.ErrSyntax
|
||||
goto Error
|
||||
}
|
||||
if int(v) >= base {
|
||||
n = 0
|
||||
err = strconv.ErrSyntax
|
||||
goto Error
|
||||
}
|
||||
|
||||
if n >= cutoff {
|
||||
// n*base overflows
|
||||
n = 1<<64 - 1
|
||||
err = strconv.ErrRange
|
||||
goto Error
|
||||
}
|
||||
n *= uint64(base)
|
||||
|
||||
n1 := n + uint64(v)
|
||||
if n1 < n || n1 > maxVal {
|
||||
// n+v overflows
|
||||
n = 1<<64 - 1
|
||||
err = strconv.ErrRange
|
||||
goto Error
|
||||
}
|
||||
n = n1
|
||||
}
|
||||
|
||||
return n, nil
|
||||
|
||||
Error:
|
||||
return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err}
|
||||
}
|
||||
|
||||
// Return the first number n such that n*base >= 1<<64.
|
||||
func cutoff64(base int) uint64 {
|
||||
if base < 2 {
|
||||
return 0
|
||||
}
|
||||
return (1<<64-1)/uint64(base) + 1
|
||||
}
|
@ -0,0 +1,88 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
commonBuildOnce sync.Once
|
||||
commonLowerHeader map[string]string // Go-Canonical-Case -> lower-case
|
||||
commonCanonHeader map[string]string // lower-case -> Go-Canonical-Case
|
||||
)
|
||||
|
||||
func buildCommonHeaderMapsOnce() {
|
||||
commonBuildOnce.Do(buildCommonHeaderMaps)
|
||||
}
|
||||
|
||||
func buildCommonHeaderMaps() {
|
||||
common := []string{
|
||||
"accept",
|
||||
"accept-charset",
|
||||
"accept-encoding",
|
||||
"accept-language",
|
||||
"accept-ranges",
|
||||
"age",
|
||||
"access-control-allow-origin",
|
||||
"allow",
|
||||
"authorization",
|
||||
"cache-control",
|
||||
"content-disposition",
|
||||
"content-encoding",
|
||||
"content-language",
|
||||
"content-length",
|
||||
"content-location",
|
||||
"content-range",
|
||||
"content-type",
|
||||
"cookie",
|
||||
"date",
|
||||
"etag",
|
||||
"expect",
|
||||
"expires",
|
||||
"from",
|
||||
"host",
|
||||
"if-match",
|
||||
"if-modified-since",
|
||||
"if-none-match",
|
||||
"if-unmodified-since",
|
||||
"last-modified",
|
||||
"link",
|
||||
"location",
|
||||
"max-forwards",
|
||||
"proxy-authenticate",
|
||||
"proxy-authorization",
|
||||
"range",
|
||||
"referer",
|
||||
"refresh",
|
||||
"retry-after",
|
||||
"server",
|
||||
"set-cookie",
|
||||
"strict-transport-security",
|
||||
"trailer",
|
||||
"transfer-encoding",
|
||||
"user-agent",
|
||||
"vary",
|
||||
"via",
|
||||
"www-authenticate",
|
||||
}
|
||||
commonLowerHeader = make(map[string]string, len(common))
|
||||
commonCanonHeader = make(map[string]string, len(common))
|
||||
for _, v := range common {
|
||||
chk := http.CanonicalHeaderKey(v)
|
||||
commonLowerHeader[chk] = v
|
||||
commonCanonHeader[v] = chk
|
||||
}
|
||||
}
|
||||
|
||||
func lowerHeader(v string) string {
|
||||
buildCommonHeaderMapsOnce()
|
||||
if s, ok := commonLowerHeader[v]; ok {
|
||||
return s
|
||||
}
|
||||
return strings.ToLower(v)
|
||||
}
|
@ -0,0 +1,240 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hpack
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
const (
|
||||
uint32Max = ^uint32(0)
|
||||
initialHeaderTableSize = 4096
|
||||
)
|
||||
|
||||
type Encoder struct {
|
||||
dynTab dynamicTable
|
||||
// minSize is the minimum table size set by
|
||||
// SetMaxDynamicTableSize after the previous Header Table Size
|
||||
// Update.
|
||||
minSize uint32
|
||||
// maxSizeLimit is the maximum table size this encoder
|
||||
// supports. This will protect the encoder from too large
|
||||
// size.
|
||||
maxSizeLimit uint32
|
||||
// tableSizeUpdate indicates whether "Header Table Size
|
||||
// Update" is required.
|
||||
tableSizeUpdate bool
|
||||
w io.Writer
|
||||
buf []byte
|
||||
}
|
||||
|
||||
// NewEncoder returns a new Encoder which performs HPACK encoding. An
|
||||
// encoded data is written to w.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
e := &Encoder{
|
||||
minSize: uint32Max,
|
||||
maxSizeLimit: initialHeaderTableSize,
|
||||
tableSizeUpdate: false,
|
||||
w: w,
|
||||
}
|
||||
e.dynTab.table.init()
|
||||
e.dynTab.setMaxSize(initialHeaderTableSize)
|
||||
return e
|
||||
}
|
||||
|
||||
// WriteField encodes f into a single Write to e's underlying Writer.
|
||||
// This function may also produce bytes for "Header Table Size Update"
|
||||
// if necessary. If produced, it is done before encoding f.
|
||||
func (e *Encoder) WriteField(f HeaderField) error {
|
||||
e.buf = e.buf[:0]
|
||||
|
||||
if e.tableSizeUpdate {
|
||||
e.tableSizeUpdate = false
|
||||
if e.minSize < e.dynTab.maxSize {
|
||||
e.buf = appendTableSize(e.buf, e.minSize)
|
||||
}
|
||||
e.minSize = uint32Max
|
||||
e.buf = appendTableSize(e.buf, e.dynTab.maxSize)
|
||||
}
|
||||
|
||||
idx, nameValueMatch := e.searchTable(f)
|
||||
if nameValueMatch {
|
||||
e.buf = appendIndexed(e.buf, idx)
|
||||
} else {
|
||||
indexing := e.shouldIndex(f)
|
||||
if indexing {
|
||||
e.dynTab.add(f)
|
||||
}
|
||||
|
||||
if idx == 0 {
|
||||
e.buf = appendNewName(e.buf, f, indexing)
|
||||
} else {
|
||||
e.buf = appendIndexedName(e.buf, f, idx, indexing)
|
||||
}
|
||||
}
|
||||
n, err := e.w.Write(e.buf)
|
||||
if err == nil && n != len(e.buf) {
|
||||
err = io.ErrShortWrite
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// searchTable searches f in both stable and dynamic header tables.
|
||||
// The static header table is searched first. Only when there is no
|
||||
// exact match for both name and value, the dynamic header table is
|
||||
// then searched. If there is no match, i is 0. If both name and value
|
||||
// match, i is the matched index and nameValueMatch becomes true. If
|
||||
// only name matches, i points to that index and nameValueMatch
|
||||
// becomes false.
|
||||
func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) {
|
||||
i, nameValueMatch = staticTable.search(f)
|
||||
if nameValueMatch {
|
||||
return i, true
|
||||
}
|
||||
|
||||
j, nameValueMatch := e.dynTab.table.search(f)
|
||||
if nameValueMatch || (i == 0 && j != 0) {
|
||||
return j + uint64(staticTable.len()), nameValueMatch
|
||||
}
|
||||
|
||||
return i, false
|
||||
}
|
||||
|
||||
// SetMaxDynamicTableSize changes the dynamic header table size to v.
|
||||
// The actual size is bounded by the value passed to
|
||||
// SetMaxDynamicTableSizeLimit.
|
||||
func (e *Encoder) SetMaxDynamicTableSize(v uint32) {
|
||||
if v > e.maxSizeLimit {
|
||||
v = e.maxSizeLimit
|
||||
}
|
||||
if v < e.minSize {
|
||||
e.minSize = v
|
||||
}
|
||||
e.tableSizeUpdate = true
|
||||
e.dynTab.setMaxSize(v)
|
||||
}
|
||||
|
||||
// SetMaxDynamicTableSizeLimit changes the maximum value that can be
|
||||
// specified in SetMaxDynamicTableSize to v. By default, it is set to
|
||||
// 4096, which is the same size of the default dynamic header table
|
||||
// size described in HPACK specification. If the current maximum
|
||||
// dynamic header table size is strictly greater than v, "Header Table
|
||||
// Size Update" will be done in the next WriteField call and the
|
||||
// maximum dynamic header table size is truncated to v.
|
||||
func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) {
|
||||
e.maxSizeLimit = v
|
||||
if e.dynTab.maxSize > v {
|
||||
e.tableSizeUpdate = true
|
||||
e.dynTab.setMaxSize(v)
|
||||
}
|
||||
}
|
||||
|
||||
// shouldIndex reports whether f should be indexed.
|
||||
func (e *Encoder) shouldIndex(f HeaderField) bool {
|
||||
return !f.Sensitive && f.Size() <= e.dynTab.maxSize
|
||||
}
|
||||
|
||||
// appendIndexed appends index i, as encoded in "Indexed Header Field"
|
||||
// representation, to dst and returns the extended buffer.
|
||||
func appendIndexed(dst []byte, i uint64) []byte {
|
||||
first := len(dst)
|
||||
dst = appendVarInt(dst, 7, i)
|
||||
dst[first] |= 0x80
|
||||
return dst
|
||||
}
|
||||
|
||||
// appendNewName appends f, as encoded in one of "Literal Header field
|
||||
// - New Name" representation variants, to dst and returns the
|
||||
// extended buffer.
|
||||
//
|
||||
// If f.Sensitive is true, "Never Indexed" representation is used. If
|
||||
// f.Sensitive is false and indexing is true, "Inremental Indexing"
|
||||
// representation is used.
|
||||
func appendNewName(dst []byte, f HeaderField, indexing bool) []byte {
|
||||
dst = append(dst, encodeTypeByte(indexing, f.Sensitive))
|
||||
dst = appendHpackString(dst, f.Name)
|
||||
return appendHpackString(dst, f.Value)
|
||||
}
|
||||
|
||||
// appendIndexedName appends f and index i referring indexed name
|
||||
// entry, as encoded in one of "Literal Header field - Indexed Name"
|
||||
// representation variants, to dst and returns the extended buffer.
|
||||
//
|
||||
// If f.Sensitive is true, "Never Indexed" representation is used. If
|
||||
// f.Sensitive is false and indexing is true, "Incremental Indexing"
|
||||
// representation is used.
|
||||
func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte {
|
||||
first := len(dst)
|
||||
var n byte
|
||||
if indexing {
|
||||
n = 6
|
||||
} else {
|
||||
n = 4
|
||||
}
|
||||
dst = appendVarInt(dst, n, i)
|
||||
dst[first] |= encodeTypeByte(indexing, f.Sensitive)
|
||||
return appendHpackString(dst, f.Value)
|
||||
}
|
||||
|
||||
// appendTableSize appends v, as encoded in "Header Table Size Update"
|
||||
// representation, to dst and returns the extended buffer.
|
||||
func appendTableSize(dst []byte, v uint32) []byte {
|
||||
first := len(dst)
|
||||
dst = appendVarInt(dst, 5, uint64(v))
|
||||
dst[first] |= 0x20
|
||||
return dst
|
||||
}
|
||||
|
||||
// appendVarInt appends i, as encoded in variable integer form using n
|
||||
// bit prefix, to dst and returns the extended buffer.
|
||||
//
|
||||
// See
|
||||
// http://http2.github.io/http2-spec/compression.html#integer.representation
|
||||
func appendVarInt(dst []byte, n byte, i uint64) []byte {
|
||||
k := uint64((1 << n) - 1)
|
||||
if i < k {
|
||||
return append(dst, byte(i))
|
||||
}
|
||||
dst = append(dst, byte(k))
|
||||
i -= k
|
||||
for ; i >= 128; i >>= 7 {
|
||||
dst = append(dst, byte(0x80|(i&0x7f)))
|
||||
}
|
||||
return append(dst, byte(i))
|
||||
}
|
||||
|
||||
// appendHpackString appends s, as encoded in "String Literal"
|
||||
// representation, to dst and returns the extended buffer.
|
||||
//
|
||||
// s will be encoded in Huffman codes only when it produces strictly
|
||||
// shorter byte string.
|
||||
func appendHpackString(dst []byte, s string) []byte {
|
||||
huffmanLength := HuffmanEncodeLength(s)
|
||||
if huffmanLength < uint64(len(s)) {
|
||||
first := len(dst)
|
||||
dst = appendVarInt(dst, 7, huffmanLength)
|
||||
dst = AppendHuffmanString(dst, s)
|
||||
dst[first] |= 0x80
|
||||
} else {
|
||||
dst = appendVarInt(dst, 7, uint64(len(s)))
|
||||
dst = append(dst, s...)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// encodeTypeByte returns type byte. If sensitive is true, type byte
|
||||
// for "Never Indexed" representation is returned. If sensitive is
|
||||
// false and indexing is true, type byte for "Incremental Indexing"
|
||||
// representation is returned. Otherwise, type byte for "Without
|
||||
// Indexing" is returned.
|
||||
func encodeTypeByte(indexing, sensitive bool) byte {
|
||||
if sensitive {
|
||||
return 0x10
|
||||
}
|
||||
if indexing {
|
||||
return 0x40
|
||||
}
|
||||
return 0
|
||||
}
|
@ -0,0 +1,504 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package hpack implements HPACK, a compression format for
|
||||
// efficiently representing HTTP header fields in the context of HTTP/2.
|
||||
//
|
||||
// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09
|
||||
package hpack
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// A DecodingError is something the spec defines as a decoding error.
|
||||
type DecodingError struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
func (de DecodingError) Error() string {
|
||||
return fmt.Sprintf("decoding error: %v", de.Err)
|
||||
}
|
||||
|
||||
// An InvalidIndexError is returned when an encoder references a table
|
||||
// entry before the static table or after the end of the dynamic table.
|
||||
type InvalidIndexError int
|
||||
|
||||
func (e InvalidIndexError) Error() string {
|
||||
return fmt.Sprintf("invalid indexed representation index %d", int(e))
|
||||
}
|
||||
|
||||
// A HeaderField is a name-value pair. Both the name and value are
|
||||
// treated as opaque sequences of octets.
|
||||
type HeaderField struct {
|
||||
Name, Value string
|
||||
|
||||
// Sensitive means that this header field should never be
|
||||
// indexed.
|
||||
Sensitive bool
|
||||
}
|
||||
|
||||
// IsPseudo reports whether the header field is an http2 pseudo header.
|
||||
// That is, it reports whether it starts with a colon.
|
||||
// It is not otherwise guaranteed to be a valid pseudo header field,
|
||||
// though.
|
||||
func (hf HeaderField) IsPseudo() bool {
|
||||
return len(hf.Name) != 0 && hf.Name[0] == ':'
|
||||
}
|
||||
|
||||
func (hf HeaderField) String() string {
|
||||
var suffix string
|
||||
if hf.Sensitive {
|
||||
suffix = " (sensitive)"
|
||||
}
|
||||
return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix)
|
||||
}
|
||||
|
||||
// Size returns the size of an entry per RFC 7541 section 4.1.
|
||||
func (hf HeaderField) Size() uint32 {
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.4.1
|
||||
// "The size of the dynamic table is the sum of the size of
|
||||
// its entries. The size of an entry is the sum of its name's
|
||||
// length in octets (as defined in Section 5.2), its value's
|
||||
// length in octets (see Section 5.2), plus 32. The size of
|
||||
// an entry is calculated using the length of the name and
|
||||
// value without any Huffman encoding applied."
|
||||
|
||||
// This can overflow if somebody makes a large HeaderField
|
||||
// Name and/or Value by hand, but we don't care, because that
|
||||
// won't happen on the wire because the encoding doesn't allow
|
||||
// it.
|
||||
return uint32(len(hf.Name) + len(hf.Value) + 32)
|
||||
}
|
||||
|
||||
// A Decoder is the decoding context for incremental processing of
|
||||
// header blocks.
|
||||
type Decoder struct {
|
||||
dynTab dynamicTable
|
||||
emit func(f HeaderField)
|
||||
|
||||
emitEnabled bool // whether calls to emit are enabled
|
||||
maxStrLen int // 0 means unlimited
|
||||
|
||||
// buf is the unparsed buffer. It's only written to
|
||||
// saveBuf if it was truncated in the middle of a header
|
||||
// block. Because it's usually not owned, we can only
|
||||
// process it under Write.
|
||||
buf []byte // not owned; only valid during Write
|
||||
|
||||
// saveBuf is previous data passed to Write which we weren't able
|
||||
// to fully parse before. Unlike buf, we own this data.
|
||||
saveBuf bytes.Buffer
|
||||
|
||||
firstField bool // processing the first field of the header block
|
||||
}
|
||||
|
||||
// NewDecoder returns a new decoder with the provided maximum dynamic
|
||||
// table size. The emitFunc will be called for each valid field
|
||||
// parsed, in the same goroutine as calls to Write, before Write returns.
|
||||
func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder {
|
||||
d := &Decoder{
|
||||
emit: emitFunc,
|
||||
emitEnabled: true,
|
||||
firstField: true,
|
||||
}
|
||||
d.dynTab.table.init()
|
||||
d.dynTab.allowedMaxSize = maxDynamicTableSize
|
||||
d.dynTab.setMaxSize(maxDynamicTableSize)
|
||||
return d
|
||||
}
|
||||
|
||||
// ErrStringLength is returned by Decoder.Write when the max string length
|
||||
// (as configured by Decoder.SetMaxStringLength) would be violated.
|
||||
var ErrStringLength = errors.New("hpack: string too long")
|
||||
|
||||
// SetMaxStringLength sets the maximum size of a HeaderField name or
|
||||
// value string. If a string exceeds this length (even after any
|
||||
// decompression), Write will return ErrStringLength.
|
||||
// A value of 0 means unlimited and is the default from NewDecoder.
|
||||
func (d *Decoder) SetMaxStringLength(n int) {
|
||||
d.maxStrLen = n
|
||||
}
|
||||
|
||||
// SetEmitFunc changes the callback used when new header fields
|
||||
// are decoded.
|
||||
// It must be non-nil. It does not affect EmitEnabled.
|
||||
func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) {
|
||||
d.emit = emitFunc
|
||||
}
|
||||
|
||||
// SetEmitEnabled controls whether the emitFunc provided to NewDecoder
|
||||
// should be called. The default is true.
|
||||
//
|
||||
// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE
|
||||
// while still decoding and keeping in-sync with decoder state, but
|
||||
// without doing unnecessary decompression or generating unnecessary
|
||||
// garbage for header fields past the limit.
|
||||
func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v }
|
||||
|
||||
// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder
|
||||
// are currently enabled. The default is true.
|
||||
func (d *Decoder) EmitEnabled() bool { return d.emitEnabled }
|
||||
|
||||
// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their
|
||||
// underlying buffers for garbage reasons.
|
||||
|
||||
func (d *Decoder) SetMaxDynamicTableSize(v uint32) {
|
||||
d.dynTab.setMaxSize(v)
|
||||
}
|
||||
|
||||
// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded
|
||||
// stream (via dynamic table size updates) may set the maximum size
|
||||
// to.
|
||||
func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) {
|
||||
d.dynTab.allowedMaxSize = v
|
||||
}
|
||||
|
||||
type dynamicTable struct {
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2
|
||||
table headerFieldTable
|
||||
size uint32 // in bytes
|
||||
maxSize uint32 // current maxSize
|
||||
allowedMaxSize uint32 // maxSize may go up to this, inclusive
|
||||
}
|
||||
|
||||
func (dt *dynamicTable) setMaxSize(v uint32) {
|
||||
dt.maxSize = v
|
||||
dt.evict()
|
||||
}
|
||||
|
||||
func (dt *dynamicTable) add(f HeaderField) {
|
||||
dt.table.addEntry(f)
|
||||
dt.size += f.Size()
|
||||
dt.evict()
|
||||
}
|
||||
|
||||
// If we're too big, evict old stuff.
|
||||
func (dt *dynamicTable) evict() {
|
||||
var n int
|
||||
for dt.size > dt.maxSize && n < dt.table.len() {
|
||||
dt.size -= dt.table.ents[n].Size()
|
||||
n++
|
||||
}
|
||||
dt.table.evictOldest(n)
|
||||
}
|
||||
|
||||
func (d *Decoder) maxTableIndex() int {
|
||||
// This should never overflow. RFC 7540 Section 6.5.2 limits the size of
|
||||
// the dynamic table to 2^32 bytes, where each entry will occupy more than
|
||||
// one byte. Further, the staticTable has a fixed, small length.
|
||||
return d.dynTab.table.len() + staticTable.len()
|
||||
}
|
||||
|
||||
func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) {
|
||||
// See Section 2.3.3.
|
||||
if i == 0 {
|
||||
return
|
||||
}
|
||||
if i <= uint64(staticTable.len()) {
|
||||
return staticTable.ents[i-1], true
|
||||
}
|
||||
if i > uint64(d.maxTableIndex()) {
|
||||
return
|
||||
}
|
||||
// In the dynamic table, newer entries have lower indices.
|
||||
// However, dt.ents[0] is the oldest entry. Hence, dt.ents is
|
||||
// the reversed dynamic table.
|
||||
dt := d.dynTab.table
|
||||
return dt.ents[dt.len()-(int(i)-staticTable.len())], true
|
||||
}
|
||||
|
||||
// Decode decodes an entire block.
|
||||
//
|
||||
// TODO: remove this method and make it incremental later? This is
|
||||
// easier for debugging now.
|
||||
func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) {
|
||||
var hf []HeaderField
|
||||
saveFunc := d.emit
|
||||
defer func() { d.emit = saveFunc }()
|
||||
d.emit = func(f HeaderField) { hf = append(hf, f) }
|
||||
if _, err := d.Write(p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := d.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return hf, nil
|
||||
}
|
||||
|
||||
// Close declares that the decoding is complete and resets the Decoder
|
||||
// to be reused again for a new header block. If there is any remaining
|
||||
// data in the decoder's buffer, Close returns an error.
|
||||
func (d *Decoder) Close() error {
|
||||
if d.saveBuf.Len() > 0 {
|
||||
d.saveBuf.Reset()
|
||||
return DecodingError{errors.New("truncated headers")}
|
||||
}
|
||||
d.firstField = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) Write(p []byte) (n int, err error) {
|
||||
if len(p) == 0 {
|
||||
// Prevent state machine CPU attacks (making us redo
|
||||
// work up to the point of finding out we don't have
|
||||
// enough data)
|
||||
return
|
||||
}
|
||||
// Only copy the data if we have to. Optimistically assume
|
||||
// that p will contain a complete header block.
|
||||
if d.saveBuf.Len() == 0 {
|
||||
d.buf = p
|
||||
} else {
|
||||
d.saveBuf.Write(p)
|
||||
d.buf = d.saveBuf.Bytes()
|
||||
d.saveBuf.Reset()
|
||||
}
|
||||
|
||||
for len(d.buf) > 0 {
|
||||
err = d.parseHeaderFieldRepr()
|
||||
if err == errNeedMore {
|
||||
// Extra paranoia, making sure saveBuf won't
|
||||
// get too large. All the varint and string
|
||||
// reading code earlier should already catch
|
||||
// overlong things and return ErrStringLength,
|
||||
// but keep this as a last resort.
|
||||
const varIntOverhead = 8 // conservative
|
||||
if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) {
|
||||
return 0, ErrStringLength
|
||||
}
|
||||
d.saveBuf.Write(d.buf)
|
||||
return len(p), nil
|
||||
}
|
||||
d.firstField = false
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return len(p), err
|
||||
}
|
||||
|
||||
// errNeedMore is an internal sentinel error value that means the
|
||||
// buffer is truncated and we need to read more data before we can
|
||||
// continue parsing.
|
||||
var errNeedMore = errors.New("need more data")
|
||||
|
||||
type indexType int
|
||||
|
||||
const (
|
||||
indexedTrue indexType = iota
|
||||
indexedFalse
|
||||
indexedNever
|
||||
)
|
||||
|
||||
func (v indexType) indexed() bool { return v == indexedTrue }
|
||||
func (v indexType) sensitive() bool { return v == indexedNever }
|
||||
|
||||
// returns errNeedMore if there isn't enough data available.
|
||||
// any other error is fatal.
|
||||
// consumes d.buf iff it returns nil.
|
||||
// precondition: must be called with len(d.buf) > 0
|
||||
func (d *Decoder) parseHeaderFieldRepr() error {
|
||||
b := d.buf[0]
|
||||
switch {
|
||||
case b&128 != 0:
|
||||
// Indexed representation.
|
||||
// High bit set?
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.1
|
||||
return d.parseFieldIndexed()
|
||||
case b&192 == 64:
|
||||
// 6.2.1 Literal Header Field with Incremental Indexing
|
||||
// 0b10xxxxxx: top two bits are 10
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1
|
||||
return d.parseFieldLiteral(6, indexedTrue)
|
||||
case b&240 == 0:
|
||||
// 6.2.2 Literal Header Field without Indexing
|
||||
// 0b0000xxxx: top four bits are 0000
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2
|
||||
return d.parseFieldLiteral(4, indexedFalse)
|
||||
case b&240 == 16:
|
||||
// 6.2.3 Literal Header Field never Indexed
|
||||
// 0b0001xxxx: top four bits are 0001
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3
|
||||
return d.parseFieldLiteral(4, indexedNever)
|
||||
case b&224 == 32:
|
||||
// 6.3 Dynamic Table Size Update
|
||||
// Top three bits are '001'.
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.3
|
||||
return d.parseDynamicTableSizeUpdate()
|
||||
}
|
||||
|
||||
return DecodingError{errors.New("invalid encoding")}
|
||||
}
|
||||
|
||||
// (same invariants and behavior as parseHeaderFieldRepr)
|
||||
func (d *Decoder) parseFieldIndexed() error {
|
||||
buf := d.buf
|
||||
idx, buf, err := readVarInt(7, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hf, ok := d.at(idx)
|
||||
if !ok {
|
||||
return DecodingError{InvalidIndexError(idx)}
|
||||
}
|
||||
d.buf = buf
|
||||
return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value})
|
||||
}
|
||||
|
||||
// (same invariants and behavior as parseHeaderFieldRepr)
|
||||
func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
|
||||
buf := d.buf
|
||||
nameIdx, buf, err := readVarInt(n, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var hf HeaderField
|
||||
wantStr := d.emitEnabled || it.indexed()
|
||||
if nameIdx > 0 {
|
||||
ihf, ok := d.at(nameIdx)
|
||||
if !ok {
|
||||
return DecodingError{InvalidIndexError(nameIdx)}
|
||||
}
|
||||
hf.Name = ihf.Name
|
||||
} else {
|
||||
hf.Name, buf, err = d.readString(buf, wantStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
hf.Value, buf, err = d.readString(buf, wantStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.buf = buf
|
||||
if it.indexed() {
|
||||
d.dynTab.add(hf)
|
||||
}
|
||||
hf.Sensitive = it.sensitive()
|
||||
return d.callEmit(hf)
|
||||
}
|
||||
|
||||
func (d *Decoder) callEmit(hf HeaderField) error {
|
||||
if d.maxStrLen != 0 {
|
||||
if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen {
|
||||
return ErrStringLength
|
||||
}
|
||||
}
|
||||
if d.emitEnabled {
|
||||
d.emit(hf)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// (same invariants and behavior as parseHeaderFieldRepr)
|
||||
func (d *Decoder) parseDynamicTableSizeUpdate() error {
|
||||
// RFC 7541, sec 4.2: This dynamic table size update MUST occur at the
|
||||
// beginning of the first header block following the change to the dynamic table size.
|
||||
if !d.firstField && d.dynTab.size > 0 {
|
||||
return DecodingError{errors.New("dynamic table size update MUST occur at the beginning of a header block")}
|
||||
}
|
||||
|
||||
buf := d.buf
|
||||
size, buf, err := readVarInt(5, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if size > uint64(d.dynTab.allowedMaxSize) {
|
||||
return DecodingError{errors.New("dynamic table size update too large")}
|
||||
}
|
||||
d.dynTab.setMaxSize(uint32(size))
|
||||
d.buf = buf
|
||||
return nil
|
||||
}
|
||||
|
||||
var errVarintOverflow = DecodingError{errors.New("varint integer overflow")}
|
||||
|
||||
// readVarInt reads an unsigned variable length integer off the
|
||||
// beginning of p. n is the parameter as described in
|
||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1.
|
||||
//
|
||||
// n must always be between 1 and 8.
|
||||
//
|
||||
// The returned remain buffer is either a smaller suffix of p, or err != nil.
|
||||
// The error is errNeedMore if p doesn't contain a complete integer.
|
||||
func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) {
|
||||
if n < 1 || n > 8 {
|
||||
panic("bad n")
|
||||
}
|
||||
if len(p) == 0 {
|
||||
return 0, p, errNeedMore
|
||||
}
|
||||
i = uint64(p[0])
|
||||
if n < 8 {
|
||||
i &= (1 << uint64(n)) - 1
|
||||
}
|
||||
if i < (1<<uint64(n))-1 {
|
||||
return i, p[1:], nil
|
||||
}
|
||||
|
||||
origP := p
|
||||
p = p[1:]
|
||||
var m uint64
|
||||
for len(p) > 0 {
|
||||
b := p[0]
|
||||
p = p[1:]
|
||||
i += uint64(b&127) << m
|
||||
if b&128 == 0 {
|
||||
return i, p, nil
|
||||
}
|
||||
m += 7
|
||||
if m >= 63 { // TODO: proper overflow check. making this up.
|
||||
return 0, origP, errVarintOverflow
|
||||
}
|
||||
}
|
||||
return 0, origP, errNeedMore
|
||||
}
|
||||
|
||||
// readString decodes an hpack string from p.
|
||||
//
|
||||
// wantStr is whether s will be used. If false, decompression and
|
||||
// []byte->string garbage are skipped if s will be ignored
|
||||
// anyway. This does mean that huffman decoding errors for non-indexed
|
||||
// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server
|
||||
// is returning an error anyway, and because they're not indexed, the error
|
||||
// won't affect the decoding state.
|
||||
func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) {
|
||||
if len(p) == 0 {
|
||||
return "", p, errNeedMore
|
||||
}
|
||||
isHuff := p[0]&128 != 0
|
||||
strLen, p, err := readVarInt(7, p)
|
||||
if err != nil {
|
||||
return "", p, err
|
||||
}
|
||||
if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) {
|
||||
return "", nil, ErrStringLength
|
||||
}
|
||||
if uint64(len(p)) < strLen {
|
||||
return "", p, errNeedMore
|
||||
}
|
||||
if !isHuff {
|
||||
if wantStr {
|
||||
s = string(p[:strLen])
|
||||
}
|
||||
return s, p[strLen:], nil
|
||||
}
|
||||
|
||||
if wantStr {
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
buf.Reset() // don't trust others
|
||||
defer bufPool.Put(buf)
|
||||
if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil {
|
||||
buf.Reset()
|
||||
return "", nil, err
|
||||
}
|
||||
s = buf.String()
|
||||
buf.Reset() // be nice to GC
|
||||
}
|
||||
return s, p[strLen:], nil
|
||||
}
|
@ -0,0 +1,222 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hpack
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var bufPool = sync.Pool{
|
||||
New: func() interface{} { return new(bytes.Buffer) },
|
||||
}
|
||||
|
||||
// HuffmanDecode decodes the string in v and writes the expanded
|
||||
// result to w, returning the number of bytes written to w and the
|
||||
// Write call's return value. At most one Write call is made.
|
||||
func HuffmanDecode(w io.Writer, v []byte) (int, error) {
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
buf.Reset()
|
||||
defer bufPool.Put(buf)
|
||||
if err := huffmanDecode(buf, 0, v); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return w.Write(buf.Bytes())
|
||||
}
|
||||
|
||||
// HuffmanDecodeToString decodes the string in v.
|
||||
func HuffmanDecodeToString(v []byte) (string, error) {
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
buf.Reset()
|
||||
defer bufPool.Put(buf)
|
||||
if err := huffmanDecode(buf, 0, v); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
// ErrInvalidHuffman is returned for errors found decoding
|
||||
// Huffman-encoded strings.
|
||||
var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data")
|
||||
|
||||
// huffmanDecode decodes v to buf.
|
||||
// If maxLen is greater than 0, attempts to write more to buf than
|
||||
// maxLen bytes will return ErrStringLength.
|
||||
func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error {
|
||||
rootHuffmanNode := getRootHuffmanNode()
|
||||
n := rootHuffmanNode
|
||||
// cur is the bit buffer that has not been fed into n.
|
||||
// cbits is the number of low order bits in cur that are valid.
|
||||
// sbits is the number of bits of the symbol prefix being decoded.
|
||||
cur, cbits, sbits := uint(0), uint8(0), uint8(0)
|
||||
for _, b := range v {
|
||||
cur = cur<<8 | uint(b)
|
||||
cbits += 8
|
||||
sbits += 8
|
||||
for cbits >= 8 {
|
||||
idx := byte(cur >> (cbits - 8))
|
||||
n = n.children[idx]
|
||||
if n == nil {
|
||||
return ErrInvalidHuffman
|
||||
}
|
||||
if n.children == nil {
|
||||
if maxLen != 0 && buf.Len() == maxLen {
|
||||
return ErrStringLength
|
||||
}
|
||||
buf.WriteByte(n.sym)
|
||||
cbits -= n.codeLen
|
||||
n = rootHuffmanNode
|
||||
sbits = cbits
|
||||
} else {
|
||||
cbits -= 8
|
||||
}
|
||||
}
|
||||
}
|
||||
for cbits > 0 {
|
||||
n = n.children[byte(cur<<(8-cbits))]
|
||||
if n == nil {
|
||||
return ErrInvalidHuffman
|
||||
}
|
||||
if n.children != nil || n.codeLen > cbits {
|
||||
break
|
||||
}
|
||||
if maxLen != 0 && buf.Len() == maxLen {
|
||||
return ErrStringLength
|
||||
}
|
||||
buf.WriteByte(n.sym)
|
||||
cbits -= n.codeLen
|
||||
n = rootHuffmanNode
|
||||
sbits = cbits
|
||||
}
|
||||
if sbits > 7 {
|
||||
// Either there was an incomplete symbol, or overlong padding.
|
||||
// Both are decoding errors per RFC 7541 section 5.2.
|
||||
return ErrInvalidHuffman
|
||||
}
|
||||
if mask := uint(1<<cbits - 1); cur&mask != mask {
|
||||
// Trailing bits must be a prefix of EOS per RFC 7541 section 5.2.
|
||||
return ErrInvalidHuffman
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type node struct {
|
||||
// children is non-nil for internal nodes
|
||||
children *[256]*node
|
||||
|
||||
// The following are only valid if children is nil:
|
||||
codeLen uint8 // number of bits that led to the output of sym
|
||||
sym byte // output symbol
|
||||
}
|
||||
|
||||
func newInternalNode() *node {
|
||||
return &node{children: new([256]*node)}
|
||||
}
|
||||
|
||||
var (
|
||||
buildRootOnce sync.Once
|
||||
lazyRootHuffmanNode *node
|
||||
)
|
||||
|
||||
func getRootHuffmanNode() *node {
|
||||
buildRootOnce.Do(buildRootHuffmanNode)
|
||||
return lazyRootHuffmanNode
|
||||
}
|
||||
|
||||
func buildRootHuffmanNode() {
|
||||
if len(huffmanCodes) != 256 {
|
||||
panic("unexpected size")
|
||||
}
|
||||
lazyRootHuffmanNode = newInternalNode()
|
||||
for i, code := range huffmanCodes {
|
||||
addDecoderNode(byte(i), code, huffmanCodeLen[i])
|
||||
}
|
||||
}
|
||||
|
||||
func addDecoderNode(sym byte, code uint32, codeLen uint8) {
|
||||
cur := lazyRootHuffmanNode
|
||||
for codeLen > 8 {
|
||||
codeLen -= 8
|
||||
i := uint8(code >> codeLen)
|
||||
if cur.children[i] == nil {
|
||||
cur.children[i] = newInternalNode()
|
||||
}
|
||||
cur = cur.children[i]
|
||||
}
|
||||
shift := 8 - codeLen
|
||||
start, end := int(uint8(code<<shift)), int(1<<shift)
|
||||
for i := start; i < start+end; i++ {
|
||||
cur.children[i] = &node{sym: sym, codeLen: codeLen}
|
||||
}
|
||||
}
|
||||
|
||||
// AppendHuffmanString appends s, as encoded in Huffman codes, to dst
|
||||
// and returns the extended buffer.
|
||||
func AppendHuffmanString(dst []byte, s string) []byte {
|
||||
rembits := uint8(8)
|
||||
|
||||
for i := 0; i < len(s); i++ {
|
||||
if rembits == 8 {
|
||||
dst = append(dst, 0)
|
||||
}
|
||||
dst, rembits = appendByteToHuffmanCode(dst, rembits, s[i])
|
||||
}
|
||||
|
||||
if rembits < 8 {
|
||||
// special EOS symbol
|
||||
code := uint32(0x3fffffff)
|
||||
nbits := uint8(30)
|
||||
|
||||
t := uint8(code >> (nbits - rembits))
|
||||
dst[len(dst)-1] |= t
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
// HuffmanEncodeLength returns the number of bytes required to encode
|
||||
// s in Huffman codes. The result is round up to byte boundary.
|
||||
func HuffmanEncodeLength(s string) uint64 {
|
||||
n := uint64(0)
|
||||
for i := 0; i < len(s); i++ {
|
||||
n += uint64(huffmanCodeLen[s[i]])
|
||||
}
|
||||
return (n + 7) / 8
|
||||
}
|
||||
|
||||
// appendByteToHuffmanCode appends Huffman code for c to dst and
|
||||
// returns the extended buffer and the remaining bits in the last
|
||||
// element. The appending is not byte aligned and the remaining bits
|
||||
// in the last element of dst is given in rembits.
|
||||
func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) {
|
||||
code := huffmanCodes[c]
|
||||
nbits := huffmanCodeLen[c]
|
||||
|
||||
for {
|
||||
if rembits > nbits {
|
||||
t := uint8(code << (rembits - nbits))
|
||||
dst[len(dst)-1] |= t
|
||||
rembits -= nbits
|
||||
break
|
||||
}
|
||||
|
||||
t := uint8(code >> (nbits - rembits))
|
||||
dst[len(dst)-1] |= t
|
||||
|
||||
nbits -= rembits
|
||||
rembits = 8
|
||||
|
||||
if nbits == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
dst = append(dst, 0)
|
||||
}
|
||||
|
||||
return dst, rembits
|
||||
}
|
@ -0,0 +1,479 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hpack
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// headerFieldTable implements a list of HeaderFields.
|
||||
// This is used to implement the static and dynamic tables.
|
||||
type headerFieldTable struct {
|
||||
// For static tables, entries are never evicted.
|
||||
//
|
||||
// For dynamic tables, entries are evicted from ents[0] and added to the end.
|
||||
// Each entry has a unique id that starts at one and increments for each
|
||||
// entry that is added. This unique id is stable across evictions, meaning
|
||||
// it can be used as a pointer to a specific entry. As in hpack, unique ids
|
||||
// are 1-based. The unique id for ents[k] is k + evictCount + 1.
|
||||
//
|
||||
// Zero is not a valid unique id.
|
||||
//
|
||||
// evictCount should not overflow in any remotely practical situation. In
|
||||
// practice, we will have one dynamic table per HTTP/2 connection. If we
|
||||
// assume a very powerful server that handles 1M QPS per connection and each
|
||||
// request adds (then evicts) 100 entries from the table, it would still take
|
||||
// 2M years for evictCount to overflow.
|
||||
ents []HeaderField
|
||||
evictCount uint64
|
||||
|
||||
// byName maps a HeaderField name to the unique id of the newest entry with
|
||||
// the same name. See above for a definition of "unique id".
|
||||
byName map[string]uint64
|
||||
|
||||
// byNameValue maps a HeaderField name/value pair to the unique id of the newest
|
||||
// entry with the same name and value. See above for a definition of "unique id".
|
||||
byNameValue map[pairNameValue]uint64
|
||||
}
|
||||
|
||||
type pairNameValue struct {
|
||||
name, value string
|
||||
}
|
||||
|
||||
func (t *headerFieldTable) init() {
|
||||
t.byName = make(map[string]uint64)
|
||||
t.byNameValue = make(map[pairNameValue]uint64)
|
||||
}
|
||||
|
||||
// len reports the number of entries in the table.
|
||||
func (t *headerFieldTable) len() int {
|
||||
return len(t.ents)
|
||||
}
|
||||
|
||||
// addEntry adds a new entry.
|
||||
func (t *headerFieldTable) addEntry(f HeaderField) {
|
||||
id := uint64(t.len()) + t.evictCount + 1
|
||||
t.byName[f.Name] = id
|
||||
t.byNameValue[pairNameValue{f.Name, f.Value}] = id
|
||||
t.ents = append(t.ents, f)
|
||||
}
|
||||
|
||||
// evictOldest evicts the n oldest entries in the table.
|
||||
func (t *headerFieldTable) evictOldest(n int) {
|
||||
if n > t.len() {
|
||||
panic(fmt.Sprintf("evictOldest(%v) on table with %v entries", n, t.len()))
|
||||
}
|
||||
for k := 0; k < n; k++ {
|
||||
f := t.ents[k]
|
||||
id := t.evictCount + uint64(k) + 1
|
||||
if t.byName[f.Name] == id {
|
||||
delete(t.byName, f.Name)
|
||||
}
|
||||
if p := (pairNameValue{f.Name, f.Value}); t.byNameValue[p] == id {
|
||||
delete(t.byNameValue, p)
|
||||
}
|
||||
}
|
||||
copy(t.ents, t.ents[n:])
|
||||
for k := t.len() - n; k < t.len(); k++ {
|
||||
t.ents[k] = HeaderField{} // so strings can be garbage collected
|
||||
}
|
||||
t.ents = t.ents[:t.len()-n]
|
||||
if t.evictCount+uint64(n) < t.evictCount {
|
||||
panic("evictCount overflow")
|
||||
}
|
||||
t.evictCount += uint64(n)
|
||||
}
|
||||
|
||||
// search finds f in the table. If there is no match, i is 0.
|
||||
// If both name and value match, i is the matched index and nameValueMatch
|
||||
// becomes true. If only name matches, i points to that index and
|
||||
// nameValueMatch becomes false.
|
||||
//
|
||||
// The returned index is a 1-based HPACK index. For dynamic tables, HPACK says
|
||||
// that index 1 should be the newest entry, but t.ents[0] is the oldest entry,
|
||||
// meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic
|
||||
// table, the return value i actually refers to the entry t.ents[t.len()-i].
|
||||
//
|
||||
// All tables are assumed to be a dynamic tables except for the global
|
||||
// staticTable pointer.
|
||||
//
|
||||
// See Section 2.3.3.
|
||||
func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) {
|
||||
if !f.Sensitive {
|
||||
if id := t.byNameValue[pairNameValue{f.Name, f.Value}]; id != 0 {
|
||||
return t.idToIndex(id), true
|
||||
}
|
||||
}
|
||||
if id := t.byName[f.Name]; id != 0 {
|
||||
return t.idToIndex(id), false
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// idToIndex converts a unique id to an HPACK index.
|
||||
// See Section 2.3.3.
|
||||
func (t *headerFieldTable) idToIndex(id uint64) uint64 {
|
||||
if id <= t.evictCount {
|
||||
panic(fmt.Sprintf("id (%v) <= evictCount (%v)", id, t.evictCount))
|
||||
}
|
||||
k := id - t.evictCount - 1 // convert id to an index t.ents[k]
|
||||
if t != staticTable {
|
||||
return uint64(t.len()) - k // dynamic table
|
||||
}
|
||||
return k + 1
|
||||
}
|
||||
|
||||
// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B
|
||||
var staticTable = newStaticTable()
|
||||
var staticTableEntries = [...]HeaderField{
|
||||
{Name: ":authority"},
|
||||
{Name: ":method", Value: "GET"},
|
||||
{Name: ":method", Value: "POST"},
|
||||
{Name: ":path", Value: "/"},
|
||||
{Name: ":path", Value: "/index.html"},
|
||||
{Name: ":scheme", Value: "http"},
|
||||
{Name: ":scheme", Value: "https"},
|
||||
{Name: ":status", Value: "200"},
|
||||
{Name: ":status", Value: "204"},
|
||||
{Name: ":status", Value: "206"},
|
||||
{Name: ":status", Value: "304"},
|
||||
{Name: ":status", Value: "400"},
|
||||
{Name: ":status", Value: "404"},
|
||||
{Name: ":status", Value: "500"},
|
||||
{Name: "accept-charset"},
|
||||
{Name: "accept-encoding", Value: "gzip, deflate"},
|
||||
{Name: "accept-language"},
|
||||
{Name: "accept-ranges"},
|
||||
{Name: "accept"},
|
||||
{Name: "access-control-allow-origin"},
|
||||
{Name: "age"},
|
||||
{Name: "allow"},
|
||||
{Name: "authorization"},
|
||||
{Name: "cache-control"},
|
||||
{Name: "content-disposition"},
|
||||
{Name: "content-encoding"},
|
||||
{Name: "content-language"},
|
||||
{Name: "content-length"},
|
||||
{Name: "content-location"},
|
||||
{Name: "content-range"},
|
||||
{Name: "content-type"},
|
||||
{Name: "cookie"},
|
||||
{Name: "date"},
|
||||
{Name: "etag"},
|
||||
{Name: "expect"},
|
||||
{Name: "expires"},
|
||||
{Name: "from"},
|
||||
{Name: "host"},
|
||||
{Name: "if-match"},
|
||||
{Name: "if-modified-since"},
|
||||
{Name: "if-none-match"},
|
||||
{Name: "if-range"},
|
||||
{Name: "if-unmodified-since"},
|
||||
{Name: "last-modified"},
|
||||
{Name: "link"},
|
||||
{Name: "location"},
|
||||
{Name: "max-forwards"},
|
||||
{Name: "proxy-authenticate"},
|
||||
{Name: "proxy-authorization"},
|
||||
{Name: "range"},
|
||||
{Name: "referer"},
|
||||
{Name: "refresh"},
|
||||
{Name: "retry-after"},
|
||||
{Name: "server"},
|
||||
{Name: "set-cookie"},
|
||||
{Name: "strict-transport-security"},
|
||||
{Name: "transfer-encoding"},
|
||||
{Name: "user-agent"},
|
||||
{Name: "vary"},
|
||||
{Name: "via"},
|
||||
{Name: "www-authenticate"},
|
||||
}
|
||||
|
||||
func newStaticTable() *headerFieldTable {
|
||||
t := &headerFieldTable{}
|
||||
t.init()
|
||||
for _, e := range staticTableEntries[:] {
|
||||
t.addEntry(e)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
var huffmanCodes = [256]uint32{
|
||||
0x1ff8,
|
||||
0x7fffd8,
|
||||
0xfffffe2,
|
||||
0xfffffe3,
|
||||
0xfffffe4,
|
||||
0xfffffe5,
|
||||
0xfffffe6,
|
||||
0xfffffe7,
|
||||
0xfffffe8,
|
||||
0xffffea,
|
||||
0x3ffffffc,
|
||||
0xfffffe9,
|
||||
0xfffffea,
|
||||
0x3ffffffd,
|
||||
0xfffffeb,
|
||||
0xfffffec,
|
||||
0xfffffed,
|
||||
0xfffffee,
|
||||
0xfffffef,
|
||||
0xffffff0,
|
||||
0xffffff1,
|
||||
0xffffff2,
|
||||
0x3ffffffe,
|
||||
0xffffff3,
|
||||
0xffffff4,
|
||||
0xffffff5,
|
||||
0xffffff6,
|
||||
0xffffff7,
|
||||
0xffffff8,
|
||||
0xffffff9,
|
||||
0xffffffa,
|
||||
0xffffffb,
|
||||
0x14,
|
||||
0x3f8,
|
||||
0x3f9,
|
||||
0xffa,
|
||||
0x1ff9,
|
||||
0x15,
|
||||
0xf8,
|
||||
0x7fa,
|
||||
0x3fa,
|
||||
0x3fb,
|
||||
0xf9,
|
||||
0x7fb,
|
||||
0xfa,
|
||||
0x16,
|
||||
0x17,
|
||||
0x18,
|
||||
0x0,
|
||||
0x1,
|
||||
0x2,
|
||||
0x19,
|
||||
0x1a,
|
||||
0x1b,
|
||||
0x1c,
|
||||
0x1d,
|
||||
0x1e,
|
||||
0x1f,
|
||||
0x5c,
|
||||
0xfb,
|
||||
0x7ffc,
|
||||
0x20,
|
||||
0xffb,
|
||||
0x3fc,
|
||||
0x1ffa,
|
||||
0x21,
|
||||
0x5d,
|
||||
0x5e,
|
||||
0x5f,
|
||||
0x60,
|
||||
0x61,
|
||||
0x62,
|
||||
0x63,
|
||||
0x64,
|
||||
0x65,
|
||||
0x66,
|
||||
0x67,
|
||||
0x68,
|
||||
0x69,
|
||||
0x6a,
|
||||
0x6b,
|
||||
0x6c,
|
||||
0x6d,
|
||||
0x6e,
|
||||
0x6f,
|
||||
0x70,
|
||||
0x71,
|
||||
0x72,
|
||||
0xfc,
|
||||
0x73,
|
||||
0xfd,
|
||||
0x1ffb,
|
||||
0x7fff0,
|
||||
0x1ffc,
|
||||
0x3ffc,
|
||||
0x22,
|
||||
0x7ffd,
|
||||
0x3,
|
||||
0x23,
|
||||
0x4,
|
||||
0x24,
|
||||
0x5,
|
||||
0x25,
|
||||
0x26,
|
||||
0x27,
|
||||
0x6,
|
||||
0x74,
|
||||
0x75,
|
||||
0x28,
|
||||
0x29,
|
||||
0x2a,
|
||||
0x7,
|
||||
0x2b,
|
||||
0x76,
|
||||
0x2c,
|
||||
0x8,
|
||||
0x9,
|
||||
0x2d,
|
||||
0x77,
|
||||
0x78,
|
||||
0x79,
|
||||
0x7a,
|
||||
0x7b,
|
||||
0x7ffe,
|
||||
0x7fc,
|
||||
0x3ffd,
|
||||
0x1ffd,
|
||||
0xffffffc,
|
||||
0xfffe6,
|
||||
0x3fffd2,
|
||||
0xfffe7,
|
||||
0xfffe8,
|
||||
0x3fffd3,
|
||||
0x3fffd4,
|
||||
0x3fffd5,
|
||||
0x7fffd9,
|
||||
0x3fffd6,
|
||||
0x7fffda,
|
||||
0x7fffdb,
|
||||
0x7fffdc,
|
||||
0x7fffdd,
|
||||
0x7fffde,
|
||||
0xffffeb,
|
||||
0x7fffdf,
|
||||
0xffffec,
|
||||
0xffffed,
|
||||
0x3fffd7,
|
||||
0x7fffe0,
|
||||
0xffffee,
|
||||
0x7fffe1,
|
||||
0x7fffe2,
|
||||
0x7fffe3,
|
||||
0x7fffe4,
|
||||
0x1fffdc,
|
||||
0x3fffd8,
|
||||
0x7fffe5,
|
||||
0x3fffd9,
|
||||
0x7fffe6,
|
||||
0x7fffe7,
|
||||
0xffffef,
|
||||
0x3fffda,
|
||||
0x1fffdd,
|
||||
0xfffe9,
|
||||
0x3fffdb,
|
||||
0x3fffdc,
|
||||
0x7fffe8,
|
||||
0x7fffe9,
|
||||
0x1fffde,
|
||||
0x7fffea,
|
||||
0x3fffdd,
|
||||
0x3fffde,
|
||||
0xfffff0,
|
||||
0x1fffdf,
|
||||
0x3fffdf,
|
||||
0x7fffeb,
|
||||
0x7fffec,
|
||||
0x1fffe0,
|
||||
0x1fffe1,
|
||||
0x3fffe0,
|
||||
0x1fffe2,
|
||||
0x7fffed,
|
||||
0x3fffe1,
|
||||
0x7fffee,
|
||||
0x7fffef,
|
||||
0xfffea,
|
||||
0x3fffe2,
|
||||
0x3fffe3,
|
||||
0x3fffe4,
|
||||
0x7ffff0,
|
||||
0x3fffe5,
|
||||
0x3fffe6,
|
||||
0x7ffff1,
|
||||
0x3ffffe0,
|
||||
0x3ffffe1,
|
||||
0xfffeb,
|
||||
0x7fff1,
|
||||
0x3fffe7,
|
||||
0x7ffff2,
|
||||
0x3fffe8,
|
||||
0x1ffffec,
|
||||
0x3ffffe2,
|
||||
0x3ffffe3,
|
||||
0x3ffffe4,
|
||||
0x7ffffde,
|
||||
0x7ffffdf,
|
||||
0x3ffffe5,
|
||||
0xfffff1,
|
||||
0x1ffffed,
|
||||
0x7fff2,
|
||||
0x1fffe3,
|
||||
0x3ffffe6,
|
||||
0x7ffffe0,
|
||||
0x7ffffe1,
|
||||
0x3ffffe7,
|
||||
0x7ffffe2,
|
||||
0xfffff2,
|
||||
0x1fffe4,
|
||||
0x1fffe5,
|
||||
0x3ffffe8,
|
||||
0x3ffffe9,
|
||||
0xffffffd,
|
||||
0x7ffffe3,
|
||||
0x7ffffe4,
|
||||
0x7ffffe5,
|
||||
0xfffec,
|
||||
0xfffff3,
|
||||
0xfffed,
|
||||
0x1fffe6,
|
||||
0x3fffe9,
|
||||
0x1fffe7,
|
||||
0x1fffe8,
|
||||
0x7ffff3,
|
||||
0x3fffea,
|
||||
0x3fffeb,
|
||||
0x1ffffee,
|
||||
0x1ffffef,
|
||||
0xfffff4,
|
||||
0xfffff5,
|
||||
0x3ffffea,
|
||||
0x7ffff4,
|
||||
0x3ffffeb,
|
||||
0x7ffffe6,
|
||||
0x3ffffec,
|
||||
0x3ffffed,
|
||||
0x7ffffe7,
|
||||
0x7ffffe8,
|
||||
0x7ffffe9,
|
||||
0x7ffffea,
|
||||
0x7ffffeb,
|
||||
0xffffffe,
|
||||
0x7ffffec,
|
||||
0x7ffffed,
|
||||
0x7ffffee,
|
||||
0x7ffffef,
|
||||
0x7fffff0,
|
||||
0x3ffffee,
|
||||
}
|
||||
|
||||
var huffmanCodeLen = [256]uint8{
|
||||
13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28,
|
||||
28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28,
|
||||
6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6,
|
||||
5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10,
|
||||
13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
|
||||
7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6,
|
||||
15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5,
|
||||
6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28,
|
||||
20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23,
|
||||
24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24,
|
||||
22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23,
|
||||
21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23,
|
||||
26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25,
|
||||
19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27,
|
||||
20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23,
|
||||
26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26,
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue