package format
import (
"bytes"
"fmt"
"github.com/joyme123/thrift-ls/parser"
)
func MustFormatAnnotations(annotations *parser.Annotations) string {
buf := bytes.NewBuffer(nil)
buf.WriteString(MustFormatKeyword(annotations.LParKeyword.Keyword))
var preNode parser.Node
preNode = annotations.LParKeyword
indent := ""
isNewLine := false
for i, anno := range annotations.Annotations {
if lineDistance(preNode, annotations.Annotations[i]) >= 1 {
buf.WriteString("\n")
isNewLine = true
indent = Indent + Indent
}
buf.WriteString(MustFormatAnnotation(anno, i == len(annotations.Annotations)-1, i == 0, indent, isNewLine))
preNode = annotations.Annotations[i]
isNewLine = false
indent = ""
}
if lineDistance(preNode, annotations.RParKeyword) >= 1 {
buf.WriteString("\n")
buf.WriteString(Indent)
}
buf.WriteString(MustFormatKeyword(annotations.RParKeyword.Keyword))
return buf.String()
}
func MustFormatAnnotation(anno *parser.Annotation, isLast bool, isFirst bool, indent string, isNewLine bool) string {
sep := ""
if (!isLast) && anno.ListSeparatorKeyword != nil {
sep = MustFormatKeyword(anno.ListSeparatorKeyword.Keyword)
}
space := ""
if (!isFirst) && (!isNewLine) {
space = " "
}
// a = "xxxx",
return fmt.Sprintf("%s%s %s %s%s", space, MustFormatIdentifier(anno.Identifier, indent), MustFormatKeyword(anno.EqualKeyword.Keyword), MustFormatLiteral(anno.Value, ""), sep)
}
package format
import (
"bytes"
"strings"
"github.com/joyme123/thrift-ls/parser"
)
// TODO(jpf): 多行注释,换行上还需要优化
// MustFormatComments formats comments
// return string doesn't include '\n' end of line
func MustFormatComments(comments []*parser.Comment, indent string) string {
fmtCtx := &fmtContext{}
buf := bytes.NewBuffer(nil)
for _, c := range comments {
if fmtCtx.preNode != nil {
buf.WriteString("\n")
if lineDistance(fmtCtx.preNode, c) > 1 {
buf.WriteString("\n")
}
}
buf.WriteString(formatMultiLineComment(c.Text, indent))
fmtCtx.preNode = c
}
return buf.String()
}
func MustFormatEndLineComments(comments []*parser.Comment, indent string) string {
if len(comments) == 0 {
return ""
}
fmtCtx := &fmtContext{}
buf := bytes.NewBufferString(" ")
for _, c := range comments {
if fmtCtx.preNode != nil {
buf.WriteString("\n")
if lineDistance(fmtCtx.preNode, c) > 1 {
buf.WriteString("\n")
}
}
buf.WriteString(formatMultiLineComment(c.Text, indent))
fmtCtx.preNode = c
}
return buf.String()
}
func formatMultiLineComment(comment string, indent string) string {
comment = strings.TrimSpace(comment)
if strings.HasPrefix(comment, "//") || strings.HasPrefix(comment, "#") {
return indent + comment
}
lines := strings.Split(comment, "\n")
if len(lines) == 1 {
return indent + comment
}
buf := bytes.NewBuffer(nil)
for i, line := range lines {
line = strings.TrimSpace(line)
space := ""
if strings.HasPrefix(line, "*") {
space = " "
}
if i == 0 {
buf.WriteString(indent + space + line)
} else {
buf.WriteString("\n" + indent + space + line)
}
}
return buf.String()
}
package format
import (
"github.com/joyme123/thrift-ls/parser"
)
const constOneLineTpl = `{{.Comments}}{{.Const}} {{.Type}} {{.Name}} {{.Equal}} {{.Value}}{{.Annotations}}{{.ListSeparator}}{{.EndLineComments}}
`
type ConstFormatter struct {
Comments string
Const string
Type string
Name string
Annotations string
Equal string
Value string
ListSeparator string
EndLineComments string
}
func MustFormatConst(cst *parser.Const) string {
comments, annos := formatCommentsAndAnnos(cst.Comments, cst.Annotations, "")
if len(cst.Comments) > 0 && lineDistance(cst.Comments[len(cst.Comments)-1], cst.ConstKeyword) > 1 {
comments = comments + "\n"
}
sep := ""
if cst.ListSeparatorKeyword != nil {
sep = MustFormatKeyword(cst.ListSeparatorKeyword.Keyword)
}
f := &ConstFormatter{
Comments: comments,
Const: MustFormatKeyword(cst.ConstKeyword.Keyword),
Type: MustFormatFieldType(cst.ConstType),
Name: MustFormatIdentifier(cst.Name, ""),
Annotations: annos,
Equal: MustFormatKeyword(cst.EqualKeyword.Keyword),
Value: MustFormatConstValue(cst.Value, "", false),
ListSeparator: sep,
EndLineComments: MustFormatEndLineComments(cst.EndLineComments, ""),
}
return MustFormat(constOneLineTpl, f)
}
package format
import (
"bytes"
"fmt"
"github.com/joyme123/thrift-ls/parser"
)
func MustFormatConstValue(cv *parser.ConstValue, indent string, newLine bool) string {
buf := bytes.NewBuffer(nil)
if len(cv.Comments) > 0 {
buf.WriteString(MustFormatComments(cv.Comments, indent))
}
sep := ""
if cv.ListSeparatorKeyword != nil {
sep = MustFormatKeyword(cv.ListSeparatorKeyword.Keyword) + " "
}
switch cv.TypeName {
case "list":
values := cv.Value.([]*parser.ConstValue)
if len(cv.Comments) > 0 && len(values) > 0 {
if lineDistance(cv.Comments[len(cv.Comments)-1], values[0]) >= 1 {
buf.WriteString("\n")
}
}
buf.WriteString(MustFormatKeyword(cv.LBrkKeyword.Keyword))
for i := range values {
// TODO(jpf): 优化显示
newLine = false
buf.WriteString(MustFormatConstValue(values[i], indent, newLine))
}
buf.WriteString(MustFormatKeyword(cv.RBrkKeyword.Keyword))
case "map":
values := cv.Value.([]*parser.ConstValue)
if len(cv.Comments) > 0 && len(values) > 0 {
if lineDistance(cv.Comments[len(cv.Comments)-1], values[0]) >= 1 {
buf.WriteString("\n")
}
}
var preNode parser.Node
buf.WriteString(MustFormatKeyword(cv.LCurKeyword.Keyword))
preNode = cv.LCurKeyword
for i := range values {
distance := lineDistance(preNode, values[i])
if distance >= 1 {
buf.WriteString("\n")
newLine = true
} else {
if i > 0 {
buf.WriteString(" ")
}
newLine = false
}
buf.WriteString(MustFormatConstValue(values[i], indent, newLine))
preNode = values[i]
}
if lineDistance(preNode, cv.RCurKeyword) >= 1 {
buf.WriteString("\n")
buf.WriteString(indent)
}
buf.WriteString(MustFormatKeyword(cv.RCurKeyword.Keyword))
case "pair":
key := cv.Key.(*parser.ConstValue)
value := cv.Value.(*parser.ConstValue)
if len(cv.Comments) > 0 {
if lineDistance(cv.Comments[len(cv.Comments)-1], key) >= 1 {
buf.WriteString("\n")
}
}
if cv.ListSeparatorKeyword != nil {
sep = MustFormatKeyword(cv.ListSeparatorKeyword.Keyword)
}
buf.WriteString(fmt.Sprintf("%s%s %s%s",
MustFormatConstValue(key, indent+Indent, newLine),
MustFormatKeyword(cv.ColonKeyword.Keyword),
MustFormatConstValue(value, indent, false),
sep))
case "identifier":
if len(cv.Comments) > 0 {
// special case for iline distance
if lineDistance(cv.Comments[len(cv.Comments)-1], cv) >= 1 {
buf.WriteString("\n")
newLine = true
}
}
if newLine {
buf.WriteString(indent)
}
buf.WriteString(fmt.Sprintf("%s%s", cv.Value.(string), sep))
case "string":
val := ""
if _, ok := cv.Value.(string); ok {
if len(cv.Comments) > 0 {
if lineDistance(cv.Comments[len(cv.Comments)-1], cv) >= 1 {
buf.WriteString("\n")
newLine = true
}
}
if newLine {
buf.WriteString(indent)
}
val = cv.Value.(string)
buf.WriteString(fmt.Sprintf("%q%s", val, sep))
} else {
literal := cv.Value.(*parser.Literal)
if len(cv.Comments) > 0 {
if lineDistance(cv.Comments[len(cv.Comments)-1], literal) >= 1 {
buf.WriteString("\n")
newLine = true
}
}
if !newLine {
indent = ""
}
val = MustFormatLiteral(literal, indent)
buf.WriteString(fmt.Sprintf("%s%s", val, sep))
}
case "i64":
if len(cv.Comments) > 0 {
if lineDistance(cv.Comments[len(cv.Comments)-1], cv) >= 1 {
buf.WriteString("\n")
newLine = true
}
}
if newLine {
buf.WriteString(indent)
}
buf.WriteString(fmt.Sprintf("%s%s", cv.ValueInText, sep))
case "double":
if len(cv.Comments) > 0 {
if lineDistance(cv.Comments[len(cv.Comments)-1], cv) >= 1 {
buf.WriteString("\n")
newLine = true
}
}
if newLine {
buf.WriteString(indent)
}
buf.WriteString(fmt.Sprintf("%s%s", cv.ValueInText, sep))
}
return buf.String()
}
package format
import (
"bytes"
"fmt"
"strings"
"github.com/joyme123/thrift-ls/parser"
)
type fmtContext struct {
// preNode record previous print node. we can use preNode as print context
// if preNodex is const or typdef, and current node is const or typedef, '\n' should be ignore
preNode parser.Node
}
func FormatDocument(doc *parser.Document) (string, error) {
return FormatDocumentWithValidation(doc, false)
}
func FormatDocumentWithValidation(doc *parser.Document, selfValidation bool) (string, error) {
if doc.ChildrenBadNode() {
return "", BadNodeError
}
buf := bytes.NewBuffer(nil)
fmtCtx := &fmtContext{}
writeBuf := func(node parser.Node, addtionalLine bool) {
if addtionalLine {
if len(buf.Bytes()) > 0 && buf.Bytes()[buf.Len()-1] != '\n' {
// if preNode doesn't have \n at end of line, set \n for it
buf.WriteString("\n")
}
buf.WriteString("\n")
}
switch node.Type() {
case "Include":
buf.WriteString(MustFormatInclude(node.(*parser.Include)))
case "CPPInclude":
buf.WriteString(MustFormatCPPInclude(node.(*parser.CPPInclude)))
case "Namespace":
buf.WriteString(MustFormatNamespace(node.(*parser.Namespace)))
case "Struct":
buf.WriteString(MustFormatStruct(node.(*parser.Struct)))
case "Union":
buf.WriteString(MustFormatUnion(node.(*parser.Union)))
case "Exception":
buf.WriteString(MustFormatException(node.(*parser.Exception)))
case "Service":
buf.WriteString(MustFormatService(node.(*parser.Service)))
case "Typedef":
buf.WriteString(MustFormatTypedef(node.(*parser.Typedef)))
case "Const":
buf.WriteString(MustFormatConst(node.(*parser.Const)))
case "Enum":
buf.WriteString(MustFormatEnum(node.(*parser.Enum)))
}
}
for _, node := range doc.Nodes {
addtionalLine := needAddtionalLineInDocument(fmtCtx.preNode, node)
writeBuf(node, addtionalLine)
fmtCtx.preNode = node
}
if len(doc.Comments) > 0 {
buf.WriteString(MustFormatComments(doc.Comments, ""))
}
res := buf.String()
res = strings.TrimSpace(res)
if selfValidation {
psr := parser.PEGParser{}
formattedAst, err := psr.Parse("formated.thrift", []byte(res))
if err != nil {
return "", fmt.Errorf("format error: format result failed to parse, error msg: %v. Please report bug to author at https://github.com/joyme123/thrift-ls/issues", err)
}
if !doc.Equals(formattedAst) {
return "", fmt.Errorf("format error: format result failed to pass self validation. Please report bug to author at https://github.com/joyme123/thrift-ls/issues")
}
}
return res, nil
}
var (
header = map[string]struct{}{
"Include": {},
"CPPInclude": {},
"Namespace": {},
}
onelineDefinition = map[string]struct{}{
"Const": {},
"Typedef": {},
}
multiLineDefinition = map[string]struct{}{
"Struct": {},
"Union": {},
"Exception": {},
"Service": {},
"Typedef": {},
"Const": {},
"Enum": {},
}
)
func isHeader(node parser.Node) bool {
_, ok := header[node.Type()]
return ok
}
func isOneLineDefinition(node parser.Node) bool {
_, ok := onelineDefinition[node.Type()]
return ok
}
func isMultiLineDefinition(node parser.Node) bool {
_, ok := multiLineDefinition[node.Type()]
return ok
}
func needAddtionalLineInDocument(preNode parser.Node, currentNode parser.Node) bool {
if preNode == nil {
return false
}
if isHeader(preNode) && isHeader(currentNode) {
if preNode.Type() == currentNode.Type() {
if lineDistance(preNode, currentNode) > 1 {
return true
}
return false
}
return true
}
if isOneLineDefinition(preNode) && isOneLineDefinition(currentNode) {
// if preNode and currentNode has one or more empty lines between them, we should reserve
// one empty line
if lineDistance(preNode, currentNode) > 1 {
return true
}
return false
}
return true
}
package format
import (
"github.com/joyme123/thrift-ls/parser"
)
const (
enumOneLineTpl = `{{.Comments}}{{.Enum}} {{.Identifier}} {{.LCUR}}{{.RCUR}}{{.Annotations}}{{.EndLineComments}}`
enumMultiLineTpl = `{{.Comments}}{{.Enum}} {{.Identifier}} {{.LCUR}}
{{.EnumValues}}{{.RCUR}}{{.Annotations}}{{.EndLineComments}}
`
)
type EnumFormatter struct {
Comments string
Enum string
Identifier string
LCUR string
EnumValues string
RCUR string
Annotations string
EndLineComments string
}
func MustFormatEnum(enum *parser.Enum) string {
comments, annos := formatCommentsAndAnnos(enum.Comments, enum.Annotations, "")
if len(enum.Comments) > 0 && lineDistance(enum.Comments[len(enum.Comments)-1], enum.EnumKeyword) > 1 {
comments = comments + "\n"
}
f := EnumFormatter{
Comments: comments,
Enum: MustFormatKeyword(enum.EnumKeyword.Keyword),
Identifier: MustFormatIdentifier(enum.Name, ""),
LCUR: MustFormatKeyword(enum.LCurKeyword.Keyword),
EnumValues: MustFormatEnumValues(enum.Values, Indent),
RCUR: MustFormatKeyword(enum.RCurKeyword.Keyword),
Annotations: annos,
EndLineComments: MustFormatEndLineComments(enum.EndLineComments, ""),
}
if len(enum.Values) > 0 {
return MustFormat(enumMultiLineTpl, f)
}
return MustFormat(enumOneLineTpl, f)
}
package format
import (
"bytes"
"fmt"
"text/tabwriter"
"github.com/joyme123/thrift-ls/parser"
)
type enumValueGroup []string
func MustFormatEnumValues(values []*parser.EnumValue, indent string) string {
buf := bytes.NewBuffer(nil)
fmtCtx := &fmtContext{}
var enumValueGroups []enumValueGroup
var eg enumValueGroup
for i, v := range values {
if needAddtionalLineForEnumValues(fmtCtx.preNode, values[i]) {
enumValueGroups = append(enumValueGroups, eg)
eg = make(enumValueGroup, 0)
}
space := " "
if Align == AlignTypeField {
space = "\t"
}
eg = append(eg, MustFormatEnumValue(v, space, indent))
fmtCtx.preNode = values[i]
}
if len(eg) > 0 {
enumValueGroups = append(enumValueGroups, eg)
}
for i, eg := range enumValueGroups {
w := new(tabwriter.Writer)
w.Init(buf, 1, 8, 1, ' ', tabwriter.TabIndent)
for j := range eg {
fmt.Fprintln(w, eg[j])
}
w.Flush()
if i < len(enumValueGroups)-1 {
buf.WriteString("\n")
}
}
return buf.String()
}
func MustFormatEnumValue(enumValue *parser.EnumValue, space, indent string) string {
comments, annos := formatCommentsAndAnnos(enumValue.Comments, enumValue.Annotations, indent)
if len(comments) > 0 && lineDistance(enumValue.Comments[len(enumValue.Comments)-1], enumValue.Name) > 1 {
comments = comments + "\n"
}
buf := bytes.NewBufferString(comments)
buf.WriteString(indent + MustFormatIdentifier(enumValue.Name, ""))
if enumValue.ValueNode != nil {
equalSpace := space
if Align == AlignTypeAssign {
equalSpace = "\t"
}
buf.WriteString(fmt.Sprintf("%s%s%s%s", equalSpace, MustFormatKeyword(enumValue.EqualKeyword.Keyword), equalSpace, MustFormatConstValue(enumValue.ValueNode, indent, false)))
}
buf.WriteString(annos)
if FieldLineComma == FieldLineCommaAdd {
buf.WriteString(",")
} else if FieldLineComma == FieldLineCommaDisable {
if enumValue.ListSeparatorKeyword != nil {
buf.WriteString(MustFormatKeyword(enumValue.ListSeparatorKeyword.Keyword))
}
}
buf.WriteString(MustFormatEndLineComments(enumValue.EndLineComments, ""))
return buf.String()
}
func needAddtionalLineForEnumValues(preNode, curNode parser.Node) bool {
if preNode == nil {
return false
}
curValue := curNode.(*parser.EnumValue)
var curStartLine int
if len(curValue.Comments) > 0 {
curStartLine = curValue.Comments[0].Pos().Line
} else {
curStartLine = curValue.Name.Pos().Line
}
return curStartLine-preNode.End().Line > 1
}
package format
import (
"github.com/joyme123/thrift-ls/parser"
)
const (
exceptionOneLineTpl = `{{.Comments}}{{.Exception}} {{.Identifier}} {{.LCUR}}{{.RCUR}}{{.Annotations}}{{.EndLineComments}}`
exceptionMultiLineTpl = `{{.Comments}}{{.Exception}} {{.Identifier}} {{.LCUR}}
{{.Fields}}{{.RCUR}}{{.Annotations}}{{.EndLineComments}}
`
)
type ExceptionFormatter struct {
Comments string
Exception string
Identifier string
LCUR string
Fields string
RCUR string
Annotations string
EndLineComments string
}
func MustFormatException(excep *parser.Exception) string {
comments, annos := formatCommentsAndAnnos(excep.Comments, excep.Annotations, "")
if len(excep.Comments) > 0 && lineDistance(excep.Comments[len(excep.Comments)-1], excep.ExceptionKeyword) > 1 {
comments = comments + "\n"
}
f := ExceptionFormatter{
Comments: comments,
Exception: MustFormatKeyword(excep.ExceptionKeyword.Keyword),
Identifier: MustFormatIdentifier(excep.Name, ""),
LCUR: MustFormatKeyword(excep.LCurKeyword.Keyword),
Fields: MustFormatFields(excep.Fields, Indent),
RCUR: MustFormatKeyword(excep.RCurKeyword.Keyword),
Annotations: annos,
EndLineComments: MustFormatEndLineComments(excep.EndLineComments, ""),
}
if len(excep.Fields) > 0 {
return MustFormat(exceptionMultiLineTpl, f)
}
return MustFormat(exceptionOneLineTpl, f)
}
package format
import (
"bytes"
"fmt"
"strings"
"text/tabwriter"
"github.com/joyme123/thrift-ls/parser"
)
type fieldGroup []string
func MustFormatFields(fields []*parser.Field, indent string) string {
buf := bytes.NewBuffer(nil)
fmtCtx := &fmtContext{}
var fieldGroups []fieldGroup
var fg fieldGroup
for _, field := range fields {
if needAddtionalLineForFields(fmtCtx.preNode, field) {
fieldGroups = append(fieldGroups, fg)
fg = make(fieldGroup, 0)
}
space := " "
if Align == AlignTypeField {
space = "\t"
}
fg = append(fg, MustFormatField(field, space, indent, false))
fmtCtx.preNode = field
}
if len(fg) > 0 {
fieldGroups = append(fieldGroups, fg)
}
for i, fg := range fieldGroups {
w := new(tabwriter.Writer)
w.Init(buf, 1, 8, 1, ' ', tabwriter.TabIndent)
for j := range fg {
fmt.Fprintln(w, fg[j])
}
w.Flush()
if i < len(fieldGroups)-1 {
buf.WriteString("\n")
}
}
return buf.String()
}
func MustFormatOneLineFields(fields []*parser.Field) string {
buf := bytes.NewBuffer(nil)
for i, field := range fields {
buf.WriteString(MustFormatField(field, " ", "", true))
if i < len(fields)-1 {
buf.WriteString(" ")
}
}
return buf.String()
}
func MustFormatField(field *parser.Field, space string, indent string, oneline bool) string {
comments, annos := formatCommentsAndAnnos(field.Comments, field.Annotations, indent)
if len(field.Comments) > 0 && lineDistance(field.Comments[len(field.Comments)-1], field.Index) > 1 {
comments = comments + "\n"
}
buf := bytes.NewBuffer([]byte(comments))
required := ""
if field.RequiredKeyword != nil {
required = MustFormatKeyword(field.RequiredKeyword.Keyword) + space
}
value := ""
if field.ConstValue != nil {
equalSpace := space
if Align == AlignTypeAssign {
equalSpace = "\t"
}
value = fmt.Sprintf("%s%s%s%s", equalSpace, MustFormatKeyword(field.EqualKeyword.Keyword), equalSpace, MustFormatConstValue(field.ConstValue, indent, false))
}
str := fmt.Sprintf("%s%d:%s%s%s%s%s%s", indent, field.Index.Value, space, required, MustFormatFieldType(field.FieldType), space, field.Identifier.Name.Text, value)
buf.WriteString(str)
buf.WriteString(annos)
if FieldLineComma == FieldLineCommaAdd && !oneline {
buf.WriteString(",")
} else if FieldLineComma == FieldLineCommaDisable || oneline {
buf.WriteString(formatListSeparator(field.ListSeparatorKeyword))
}
if len(field.EndLineComments) > 0 {
buf.WriteString(MustFormatEndLineComments(field.EndLineComments, ""))
}
// remove space at end of line
return strings.TrimRight(buf.String(), " ")
}
func MustFormatFieldType(ft *parser.FieldType) string {
if ft == nil {
return ""
}
annos := ""
if ft.Annotations != nil {
annos = MustFormatAnnotations(ft.Annotations)
if len(ft.Annotations.Annotations) > 0 {
annos = " " + annos
}
}
tn := MustFormatTypeName(ft.TypeName)
switch ft.TypeName.Name {
case "map":
return fmt.Sprintf("%s<%s,%s>%s", tn, MustFormatFieldType(ft.KeyType), MustFormatFieldType(ft.ValueType), annos)
case "set":
return fmt.Sprintf("%s<%s>%s", tn, MustFormatFieldType(ft.KeyType), annos)
case "list":
return fmt.Sprintf("%s<%s>%s", tn, MustFormatFieldType(ft.KeyType), annos)
default:
return tn + annos
}
}
func MustFormatTypeName(tn *parser.TypeName) string {
comments := MustFormatComments(tn.Comments, "")
if len(tn.Comments) > 0 {
comments = comments + " "
}
return comments + tn.Name
}
func needAddtionalLineForFields(preNode, curNode parser.Node) bool {
if preNode == nil {
return false
}
curField := curNode.(*parser.Field)
var curStartLine int
if len(curField.Comments) > 0 {
curStartLine = curField.Comments[0].Pos().Line
} else {
if curField.Index != nil {
curStartLine = curField.Index.Pos().Line
} else if curField.RequiredKeyword != nil {
curStartLine = curField.RequiredKeyword.Pos().Line
} else {
curStartLine = curField.FieldType.Pos().Line
}
}
return curStartLine-preNode.End().Line > 1
}
package format
import (
"bytes"
"github.com/joyme123/thrift-ls/parser"
)
func MustFormatFunctions(fns []*parser.Function, indent string) string {
buf := bytes.NewBuffer(nil)
fmtCtx := &fmtContext{}
for i := range fns {
if needAddtionalLineForFuncs(fmtCtx.preNode, fns[i]) {
buf.WriteString("\n")
}
buf.WriteString(MustFormatFunction(fns[i], indent))
if i < len(fns)-1 {
buf.WriteString("\n")
}
fmtCtx.preNode = fns[i]
}
return buf.String()
}
const functionTpl = "{{.Oneway}}{{.FunctionType}} {{.Identifier}}{{.LPAR}}{{.Args}}{{.RPAR}}{{.Throws}}{{.Annotations}}{{.ListSeparator}}{{.EndLineComments}}"
type FunctionFormatter struct {
Oneway string
FunctionType string
Identifier string
LPAR string
Args string
RPAR string
Throws string
Annotations string
ListSeparator string
EndLineComments string
}
func MustFormatFunction(fn *parser.Function, indent string) string {
comments, annos := formatCommentsAndAnnos(fn.Comments, fn.Annotations, indent)
var firstNode parser.Node
if fn.Void != nil {
firstNode = fn.Void
} else {
firstNode = fn.FunctionType
}
if len(fn.Comments) > 0 && lineDistance(fn.Comments[len(fn.Comments)-1], firstNode) > 1 {
comments = comments + "\n"
}
oneway := ""
if fn.Oneway != nil {
oneway = "oneway "
}
args := ""
if len(fn.Arguments) > 0 {
args = MustFormatOneLineFields(fn.Arguments)
}
ft := ""
if fn.Void != nil {
ft = MustFormatKeyword(fn.Void.Keyword)
} else {
ft = MustFormatFieldType(fn.FunctionType)
}
sep := ""
if FieldLineComma == FieldLineCommaAdd { // add comma always
sep = ","
} else if FieldLineComma == FieldLineCommaDisable { // add list separator
if fn.ListSeparatorKeyword != nil {
sep = MustFormatKeyword(fn.ListSeparatorKeyword.Keyword)
}
} // otherwise, sep will be removed
throws := MustFormatThrows(fn.Throws)
if fn.Throws != nil {
throws = " " + throws
}
f := &FunctionFormatter{
Oneway: oneway,
FunctionType: ft,
Identifier: MustFormatIdentifier(fn.Name, ""),
LPAR: MustFormatKeyword(fn.LParKeyword.Keyword),
Args: args,
RPAR: MustFormatKeyword(fn.RParKeyword.Keyword),
Throws: throws,
Annotations: annos,
ListSeparator: sep,
EndLineComments: MustFormatEndLineComments(fn.EndLineComments, ""),
}
fnStr := MustFormat(functionTpl, f)
fnStr = comments + indent + fnStr
return fnStr
}
const throwTpl = "{{.Throw}} {{.LPAR}}{{.Fields}}{{.RPAR}}"
type ThrowFormatter struct {
Throw string
LPAR string
Fields string
RPAR string
}
func MustFormatThrows(throws *parser.Throws) string {
if throws == nil {
return ""
}
args := ""
if len(throws.Fields) > 0 {
args = MustFormatOneLineFields(throws.Fields)
}
f := &ThrowFormatter{
Throw: MustFormatKeyword(throws.ThrowsKeyword.Keyword),
LPAR: MustFormatKeyword(throws.LParKeyword.Keyword),
Fields: args,
RPAR: MustFormatKeyword(throws.RParKeyword.Keyword),
}
return MustFormat(throwTpl, f)
}
func needAddtionalLineForFuncs(preNode, curNode parser.Node) bool {
if preNode == nil {
return false
}
curFunc := curNode.(*parser.Function)
var curStartLine int
if len(curFunc.Comments) > 0 {
curStartLine = curFunc.Comments[0].Pos().Line
} else {
if curFunc.FunctionType != nil {
curStartLine = curFunc.FunctionType.Pos().Line
} else if curFunc.Void != nil {
curStartLine = curFunc.Void.Pos().Line
} else {
curStartLine = curFunc.Name.Pos().Line
}
}
return curStartLine-preNode.End().Line > 1
}
package format
import (
"fmt"
"github.com/joyme123/thrift-ls/parser"
)
func MustFormatIdentifier(id *parser.Identifier, indent string) string {
comments := MustFormatComments(id.Comments, indent)
if comments != "" {
comments = comments
if lineDistance(id.Comments[len(id.Comments)-1], id.Name) >= 1 {
comments = comments + "\n"
} else {
comments = comments + " "
}
}
return fmt.Sprintf("%s%s", comments, indent+id.Name.Text)
}
package format
import (
"github.com/joyme123/thrift-ls/parser"
)
const includeTpl = "{{.Comments}}{{.Include}} {{.Path}}{{.EndLineComments}}\n"
type IncludeFormatter struct {
Comments string
Include string
Path string
EndLineComments string
}
func MustFormatInclude(inc *parser.Include) string {
comments, _ := formatCommentsAndAnnos(inc.Comments, nil, "")
if len(inc.Comments) > 0 && lineDistance(inc.Comments[len(inc.Comments)-1], inc.IncludeKeyword) > 1 {
comments = comments + "\n"
}
f := &IncludeFormatter{
Comments: comments,
Include: MustFormatKeyword(inc.IncludeKeyword.Keyword),
Path: MustFormatLiteral(inc.Path, ""),
EndLineComments: MustFormatComments(inc.EndLineComments, ""),
}
return MustFormat(includeTpl, f)
}
func MustFormatCPPInclude(inc *parser.CPPInclude) string {
comments, _ := formatCommentsAndAnnos(inc.Comments, nil, "")
if len(inc.Comments) > 0 && lineDistance(inc.Comments[len(inc.Comments)-1], inc.CPPIncludeKeyword) > 1 {
comments = comments + "\n"
}
f := &IncludeFormatter{
Comments: comments,
Include: MustFormatKeyword(inc.CPPIncludeKeyword.Keyword),
Path: MustFormatLiteral(inc.Path, ""),
EndLineComments: MustFormatComments(inc.EndLineComments, ""),
}
return MustFormat(includeTpl, f)
}
package format
import (
"bytes"
"github.com/joyme123/thrift-ls/parser"
)
func MustFormatKeyword(kw parser.Keyword) string {
if len(kw.Comments) > 0 {
buf := bytes.NewBuffer(nil)
buf.WriteString(MustFormatComments(kw.Comments, ""))
if lineDistance(kw.Comments[len(kw.Comments)-1], kw.Literal) >= 1 {
buf.WriteString("\n")
} else {
buf.WriteString(" ")
}
buf.WriteString(kw.Literal.Text)
return buf.String()
}
return kw.Literal.Text
}
package format
import (
"bytes"
"fmt"
"github.com/joyme123/thrift-ls/parser"
)
func MustFormatLiteral(l *parser.Literal, indent string) string {
if len(l.Comments) > 0 {
buf := bytes.NewBuffer(nil)
buf.WriteString(MustFormatComments(l.Comments, indent))
if lineDistance(l.Comments[len(l.Comments)-1], l.Value) >= 1 {
buf.WriteString("\n")
buf.WriteString(indent)
} else {
buf.WriteString(" ")
}
buf.WriteString(fmt.Sprintf("%s%s%s", l.Quote, l.Value.Text, l.Quote))
return buf.String()
}
return indent + fmt.Sprintf("%s%s%s", l.Quote, l.Value.Text, l.Quote)
}
package format
import (
"github.com/joyme123/thrift-ls/parser"
)
const namespaceOneLineTpl = "{{.Comments}}{{.Namespace}} {{.Language}} {{.Name}}{{.Annotations}}{{.EndLineComments}}\n"
type NamespaceFormatter struct {
Comments string
Namespace string
Language string
Name string
Annotations string
EndLineComments string
}
func MustFormatNamespace(ns *parser.Namespace) string {
comments, annos := formatCommentsAndAnnos(ns.Comments, ns.Annotations, "")
if len(ns.Comments) > 0 && lineDistance(ns.Comments[len(ns.Comments)-1], ns.NamespaceKeyword) > 1 {
comments = comments + "\n"
}
f := &NamespaceFormatter{
Comments: comments,
Namespace: MustFormatKeyword(ns.NamespaceKeyword.Keyword),
Language: MustFormatIdentifier(&ns.Language.Identifier, ""),
Name: MustFormatIdentifier(ns.Name, ""),
Annotations: annos,
EndLineComments: MustFormatEndLineComments(ns.EndLineComments, ""),
}
return MustFormat(namespaceOneLineTpl, f)
}
package format
import (
"flag"
"strconv"
"strings"
)
var Indent = " "
var Align = "field"
var FieldLineComma = "disable"
type Options struct {
// Do not print reformatted sources to standard output.
// If a file's formatting is different from thriftls's, overwrite it
// with thrfitls's version.
Write bool `yaml:"rewrite"`
// Indent to use. Support: nspace(s), ntab(s). example: 4spaces, 1tab, tab
// if indent format is invalid or not specified, default is 4spaces
Indent string `yaml:"indent"`
// Do not print reformatted sources to standard output.
// If a file's formatting is different than gofmt's, print diffs
// to standard output.
Diff bool `yaml:"diff"`
// Align enables align option for struct/enum/exception/union fields
// Options: "field", "assign", "disable"
// Default is "field" if not set
Align string `yaml:"alignByAssign"`
// FieldLineComma represents whether to add or remove comma at the end of field line.
// Options: "add", "remove", "disable"
// if choose disable, user input will be retained without modification
// Default is "disable" if not set
FieldLineComma string `yaml:"fieldLineComma"`
}
func (o *Options) SetFlags() {
flag.BoolVar(&o.Write, "w", false, "Do not print reformatted sources to standard output. If a file's formatting is different from thriftls's, overwrite it with thrfitls's version.")
flag.BoolVar(&o.Diff, "d", false, "Do not print reformatted sources to standard output. If a file's formatting is different than gofmt's, print diffs to standard output.")
flag.StringVar(&o.Indent, "indent", "4spaces", "Indent to use. Support: num*space, num*tab. example: 4spaces, 1tab, tab")
flag.StringVar(&o.Align, "align", "field", `Align enables align option for struct/enum/exception/union fields, Options: "field", "assign", "disable", Default is "field" if not set.`)
flag.StringVar(&o.FieldLineComma, "fieldLineComma", "disable", `FieldLineComma enables whether to add or remove comma at end of field line. Options: "add", "remove", "disable". If choose disable, user input will be retained without modification. Default is "disable" if not set`)
}
func (o *Options) InitDefault() {
Indent = o.GetIndent()
if o.Align == "" || (o.Align != AlignTypeField && o.Align != AlignTypeAssign && o.Align != AlignTypeDisable) {
o.Align = "field"
}
Align = o.Align
if o.FieldLineComma == "" ||
(o.FieldLineComma != FieldLineCommaAdd &&
o.FieldLineComma != FieldLineCommaRemove &&
o.FieldLineComma != FieldLineCommaDisable) {
o.FieldLineComma = "disable"
}
FieldLineComma = o.FieldLineComma
}
func (o *Options) GetIndent() string {
if o.Indent == "" {
o.Indent = "4spaces"
}
indent := o.Indent
suffixes := []string{"spaces", "space", "tabs", "tab"}
for _, suffix := range suffixes {
if strings.HasSuffix(indent, suffix) {
char := ""
if strings.HasPrefix(suffix, "tab") {
char = " "
} else {
char = " "
}
num := 1
numStr := strings.TrimSuffix(indent, suffix)
if len(numStr) == 0 {
num = 1
} else {
num, _ = strconv.Atoi(numStr)
if num == 0 {
num = 4
char = " "
}
}
return strings.Repeat(char, num)
}
}
return " "
}
package format
import (
"github.com/joyme123/thrift-ls/parser"
)
const (
serviceOneLineTpl = `{{.Comments}}{{.Service}} {{.Identifier}}{{.Extends}}{{.ExtendServiceName}} {{.LCUR}}{{.RCUR}}{{.Annotations}}{{.EndLineComments}}`
serviceMultiLineTpl = `{{.Comments}}{{.Service}} {{.Identifier}}{{.Extends}}{{.ExtendServiceName}} {{.LCUR}}
{{.Functions}}
{{.RCUR}}{{.Annotations}}{{.EndLineComments}}
`
)
type ServiceFormatter struct {
Comments string
Service string
Identifier string
LCUR string
Functions string
RCUR string
Annotations string
EndLineComments string
Extends string
ExtendServiceName string
}
func MustFormatService(svc *parser.Service) string {
comments, annos := formatCommentsAndAnnos(svc.Comments, svc.Annotations, "")
if len(svc.Comments) > 0 && lineDistance(svc.Comments[len(svc.Comments)-1], svc.ServiceKeyword) > 1 {
comments = comments + "\n"
}
f := ServiceFormatter{
Comments: comments,
Service: MustFormatKeyword(svc.ServiceKeyword.Keyword),
Identifier: MustFormatIdentifier(svc.Name, ""),
LCUR: MustFormatKeyword(svc.LCurKeyword.Keyword),
Functions: MustFormatFunctions(svc.Functions, Indent),
RCUR: MustFormatKeyword(svc.RCurKeyword.Keyword),
Annotations: annos,
EndLineComments: MustFormatEndLineComments(svc.EndLineComments, ""),
}
if svc.ExtendsKeyword != nil {
f.Extends = " " + MustFormatKeyword(svc.ExtendsKeyword.Keyword)
}
if svc.Extends != nil {
f.ExtendServiceName = " " + MustFormatIdentifier(svc.Extends, "")
}
if len(svc.Functions) > 0 {
return MustFormat(serviceMultiLineTpl, f)
}
return MustFormat(serviceOneLineTpl, f)
}
package format
import (
"github.com/joyme123/thrift-ls/parser"
)
const (
structOneLineTpl = `{{.Comments}}{{.Struct}} {{.Identifier}} {{.LCUR}}{{.RCUR}}{{.Annotations}}{{.EndLineComments}}`
structMultiLineTpl = `{{.Comments}}{{.Struct}} {{.Identifier}} {{.LCUR}}
{{.Fields}}{{.RCUR}}{{.Annotations}}{{.EndLineComments}}
`
)
type StructFormatter struct {
Comments string
Struct string
Identifier string
LCUR string
Fields string
RCUR string
Annotations string
EndLineComments string
}
func MustFormatStruct(st *parser.Struct) string {
comments, annos := formatCommentsAndAnnos(st.Comments, st.Annotations, "")
if len(st.Comments) > 0 && lineDistance(st.Comments[len(st.Comments)-1], st.StructKeyword) > 1 {
comments = comments + "\n"
}
f := StructFormatter{
Comments: comments,
Struct: MustFormatKeyword(st.StructKeyword.Keyword),
Identifier: MustFormatIdentifier(st.Identifier, ""),
LCUR: MustFormatKeyword(st.LCurKeyword.Keyword),
Fields: MustFormatFields(st.Fields, Indent),
RCUR: MustFormatKeyword(st.RCurKeyword.Keyword),
Annotations: annos,
EndLineComments: MustFormatEndLineComments(st.EndLineComments, ""),
}
if len(st.Fields) > 0 {
return MustFormat(structMultiLineTpl, f)
}
return MustFormat(structOneLineTpl, f)
}
package format
import (
"github.com/joyme123/thrift-ls/parser"
)
const typedefOneLineTpl = `{{.Comments}}{{.Typedef}} {{.Type}} {{.Name}}{{.Annotations}}{{.EndLineComments}}
`
type TypedefFormatter struct {
Comments string
Typedef string
Type string
Name string
Annotations string
EndLineComments string
}
func MustFormatTypedef(td *parser.Typedef) string {
comments, annos := formatCommentsAndAnnos(td.Comments, td.Annotations, "")
if len(td.Comments) > 0 && lineDistance(td.Comments[len(td.Comments)-1], td.TypedefKeyword) > 1 {
comments = comments + "\n"
}
f := &TypedefFormatter{
Comments: comments,
Typedef: MustFormatKeyword(td.TypedefKeyword.Keyword),
Type: MustFormatFieldType(td.T),
Name: MustFormatIdentifier(td.Alias, ""),
Annotations: annos,
EndLineComments: MustFormatEndLineComments(td.EndLineComments, ""),
}
return MustFormat(typedefOneLineTpl, f)
}
package format
import (
"github.com/joyme123/thrift-ls/parser"
)
const (
unionOneLineTpl = `{{.Comments}}{{.Union}} {{.Identifier}} {{.LCUR}}{{.RCUR}}{{.Annotations}}{{.EndLineComments}}`
unionMultiLineTpl = `{{.Comments}}{{.Union}} {{.Identifier}} {{.LCUR}}
{{.Fields}}{{.RCUR}}{{.Annotations}}{{.EndLineComments}}
`
)
type UnionFormatter struct {
Comments string
Union string
Identifier string
LCUR string
Fields string
RCUR string
Annotations string
EndLineComments string
}
func MustFormatUnion(union *parser.Union) string {
comments, annos := formatCommentsAndAnnos(union.Comments, union.Annotations, "")
if len(union.Comments) > 0 && lineDistance(union.Comments[len(union.Comments)-1], union.UnionKeyword) > 1 {
comments = comments + "\n"
}
f := UnionFormatter{
Comments: comments,
Union: MustFormatKeyword(union.UnionKeyword.Keyword),
Identifier: MustFormatIdentifier(union.Name, ""),
LCUR: MustFormatKeyword(union.LCurKeyword.Keyword),
Fields: MustFormatFields(union.Fields, Indent),
RCUR: MustFormatKeyword(union.RCurKeyword.Keyword),
Annotations: annos,
EndLineComments: MustFormatEndLineComments(union.EndLineComments, ""),
}
if len(union.Fields) > 0 {
return MustFormat(unionMultiLineTpl, f)
}
return MustFormat(unionOneLineTpl, f)
}
package format
import (
"bytes"
"fmt"
"text/template"
"unicode"
"github.com/joyme123/thrift-ls/parser"
)
const (
AlignTypeAssign = "assign"
AlignTypeField = "field"
AlignTypeDisable = "disable"
)
const (
FieldLineCommaAdd = "add"
FieldLineCommaRemove = "remove"
FieldLineCommaDisable = "disable"
)
func MustFormat(tplText string, formatter any) string {
tpl, err := template.New("default").Parse(tplText)
if err != nil {
panic(err)
}
buf := bytes.NewBuffer(nil)
err = tpl.Execute(buf, formatter)
if err != nil {
panic(err)
}
return buf.String()
}
func formatCommentsAndAnnos(comments []*parser.Comment, annotations *parser.Annotations, indent string) (string, string) {
commentsStr := ""
if len(comments) > 0 {
commentsStr = MustFormatComments(comments, indent) + "\n"
}
annos := ""
if annotations != nil && len(annotations.Annotations) > 0 {
annos = " " + MustFormatAnnotations(annotations)
}
return commentsStr, annos
}
func formatListSeparator(sep *parser.ListSeparatorKeyword) string {
if sep == nil {
return ""
}
return MustFormatKeyword(sep.Keyword)
}
func lineDistance(preNode parser.Node, currentNode parser.Node) int {
return currentNode.Pos().Line - preNode.End().Line
}
// FormatedEquals is used to judge if documents has been changed after formated
// implementation: ignore all space charactor to compare two strings
func EqualsAfterFormat(doc1, doc2 string) error {
cur1, cur2 := 0, 0
runes1 := []rune(doc1)
runes2 := []rune(doc2)
for cur1 < len(runes1) && cur2 < len(runes2) {
for cur1 < len(runes1) && unicode.IsSpace(runes1[cur1]) {
cur1++
}
for cur2 < len(runes2) && unicode.IsSpace(runes2[cur2]) {
cur2++
}
if cur1 >= len(runes1) || cur2 >= len(runes2) {
break
}
if runes1[cur1] != runes2[cur2] {
return fmt.Errorf("different at doc1: %s as %d, doc2: %s at %d, str1: %s, str2: %s", string(runes1[cur1]), cur1, string(runes2[cur2]), cur2, showStringContext(runes1, cur1, 40), showStringContext(runes2, cur2, 40))
}
cur1++
cur2++
}
for cur1 < len(runes1) {
for !unicode.IsSpace(runes1[cur1]) {
return fmt.Errorf("")
}
cur1++
}
for cur2 < len(runes2) {
for !unicode.IsSpace(runes2[cur2]) {
return fmt.Errorf("")
}
cur2++
}
return nil
}
func showStringContext(text []rune, offset int, n int) string {
start := offset - n
if start < 0 {
start = 0
}
end := offset + n
if end >= len(text) {
end = len(text) - 1
}
return string(text[start : end+1])
}
package log
import (
"os"
log "github.com/sirupsen/logrus"
)
func Init(logLevel int) {
file := os.TempDir() + "/thriftls.log"
logFile, err := os.OpenFile(file, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0766)
if err != nil {
panic(err)
}
log.SetLevel(log.Level(logLevel))
log.SetOutput(logFile) // 将文件设置为log输出的文件
}
type Logger struct {
}
package cache
import (
"reflect"
"strconv"
"sync/atomic"
"github.com/joyme123/thrift-ls/lsp/memoize"
)
type Cache struct {
id string
store *memoize.Store
*memoizedFS
}
var cacheIndex int64
func New(store *memoize.Store) *Cache {
index := atomic.AddInt64(&cacheIndex, 1)
if store == nil {
store = &memoize.Store{}
}
c := &Cache{
id: strconv.FormatInt(index, 10),
store: store,
memoizedFS: &memoizedFS{filesByID: map[FileID][]*DiskFile{}},
}
return c
}
func (c *Cache) ID() string { return c.id }
func (c *Cache) MemStats() map[reflect.Type]int { return c.store.Stats() }
package cache
import (
"bytes"
"context"
"crypto/sha256"
"fmt"
"sync"
"time"
"github.com/joyme123/protocol"
"go.lsp.dev/uri"
)
// A FileID uniquely identifies a file in the file system.
//
// If GetFileID(name1) returns the same ID as GetFileID(name2), the two file
// names denote the same file.
// A FileID is comparable, and thus suitable for use as a map key.
type FileID struct {
device, inode uint64
}
// GetFileID returns the file system's identifier for the file, and its
// modification time.
// Like os.Stat, it reads through symbolic links.
func GetFileID(filename string) (FileID, time.Time, error) { return getFileID(filename) }
type Hash [sha256.Size]byte
// HashOf returns the hash of some data.
func HashOf(data []byte) Hash {
return Hash(sha256.Sum256(data))
}
// Hashf returns the hash of a printf-formatted string.
func Hashf(format string, args ...interface{}) Hash {
// Although this looks alloc-heavy, it is faster than using
// Fprintf on sha256.New() because the allocations don't escape.
return HashOf([]byte(fmt.Sprintf(format, args...)))
}
// String returns the digest as a string of hex digits.
func (h Hash) String() string {
return fmt.Sprintf("%64x", [sha256.Size]byte(h))
}
// Less returns true if the given hash is less than the other.
func (h Hash) Less(other Hash) bool {
return bytes.Compare(h[:], other[:]) < 0
}
// XORWith updates *h to *h XOR h2.
func (h *Hash) XORWith(h2 Hash) {
// Small enough that we don't need crypto/subtle.XORBytes.
for i := range h {
h[i] ^= h2[i]
}
}
// FileIdentity uniquely identifies a file at a version from a FileSystem.
type FileIdentity struct {
URI uri.URI
Hash Hash // digest of file contents
}
func (id FileIdentity) String() string {
return fmt.Sprintf("%s%s", id.URI, id.Hash)
}
// A FileHandle represents the URI, content, hash, and optional
// version of a file tracked by the LSP session.
//
// File content may be provided by the file system (for Saved files)
// or from an overlay, for open files with unsaved edits.
// A FileHandle may record an attempt to read a non-existent file,
// in which case Content returns an error.
type FileHandle interface {
// URI is the URI for this file handle.
// TODO(rfindley): this is not actually well-defined. In some cases, there
// may be more than one URI that resolve to the same FileHandle. Which one is
// this?
URI() uri.URI
// FileIdentity returns a FileIdentity for the file, even if there was an
// error reading it.
FileIdentity() FileIdentity
// Saved reports whether the file has the same content on disk:
// it is false for files open on an editor with unsaved edits.
Saved() bool
// Version returns the file version, as defined by the LSP client.
// For on-disk file handles, Version returns 0.
Version() int32
// Content returns the contents of a file.
// If the file is not available, returns a nil slice and an error.
Content() ([]byte, error)
}
// A FileSource maps URIs to FileHandles.
type FileSource interface {
// ReadFile returns the FileHandle for a given URI, either by
// reading the content of the file or by obtaining it from a cache.
ReadFile(ctx context.Context, uri uri.URI) (FileHandle, error)
}
// FilesMap holds files on disk and overlay files
type FilesMap struct {
mu sync.RWMutex
files map[uri.URI]FileHandle
overlays map[uri.URI]*Overlay
}
func (m *FilesMap) Get(key uri.URI) (FileHandle, bool) {
m.mu.RLock()
defer m.mu.RUnlock()
fh, ok := m.files[key]
return fh, ok
}
func (m *FilesMap) Set(key uri.URI, file FileHandle) {
m.mu.Lock()
defer m.mu.Unlock()
m.files[key] = file
if o, ok := file.(*Overlay); ok {
m.overlays[key] = o
}
}
func (m *FilesMap) Forget(key uri.URI) {
m.mu.Lock()
defer m.mu.Unlock()
delete(m.files, key)
delete(m.overlays, key)
}
func (m *FilesMap) Clone() *FilesMap {
m.mu.RLock()
defer m.mu.RUnlock()
newMap := &FilesMap{
files: make(map[uri.URI]FileHandle),
overlays: make(map[uri.URI]*Overlay),
}
for key := range m.files {
newMap.files[key] = m.files[key]
}
for key := range m.overlays {
newMap.overlays[key] = m.overlays[key]
}
return newMap
}
func (m *FilesMap) Destroy() {
m.files = nil
m.overlays = nil
}
type FileChangeType string
const (
FileChangeTypeInitialize FileChangeType = "Initialize"
FileChangeTypeDidOpen FileChangeType = "DidOpen"
FileChangeTypeDidChange FileChangeType = "DidChange"
FileChangeTypeDidSave FileChangeType = "DidSave"
)
type FileChange struct {
URI uri.URI
Version int
Content []byte
From FileChangeType
}
func (f *FileChange) FullContent(base []byte) []byte {
// only support full change now
return f.Content
}
func FileChangeFromLSPDidChange(params *protocol.DidChangeTextDocumentParams) []*FileChange {
changes := make([]*FileChange, 0, len(params.ContentChanges))
for i := range params.ContentChanges {
changes = append(changes, &FileChange{
URI: params.TextDocument.TextDocumentIdentifier.URI,
Version: int(params.TextDocument.Version),
Content: []byte(params.ContentChanges[i].Text),
From: FileChangeTypeDidChange,
})
}
return changes
}
//go:build !windows && !plan9
// +build !windows,!plan9
package cache
import (
"os"
"syscall"
"time"
)
func getFileID(filename string) (FileID, time.Time, error) {
fi, err := os.Stat(filename)
if err != nil {
return FileID{}, time.Time{}, err
}
stat := fi.Sys().(*syscall.Stat_t)
return FileID{
device: uint64(stat.Dev), // (int32 on darwin, uint64 on linux)
inode: stat.Ino,
}, fi.ModTime(), nil
}
package cache
import (
"context"
"os"
"sync"
"time"
"go.lsp.dev/uri"
)
// A memoizedFS is a file source that memoizes reads, to reduce IO.
type memoizedFS struct {
mu sync.Mutex
// filesByID maps existing file inodes to the result of a read.
// (The read may have failed, e.g. due to EACCES or a delete between stat+read.)
// Each slice is a non-empty list of aliases: different URIs.
filesByID map[FileID][]*DiskFile
}
func newMemoizedFS() *memoizedFS {
return &memoizedFS{filesByID: make(map[FileID][]*DiskFile)}
}
// A DiskFile is a file on the filesystem, or a failure to read one.
// It implements the source.FileHandle interface.
type DiskFile struct {
uri uri.URI
modTime time.Time
content []byte
hash Hash
err error
}
func (h *DiskFile) URI() uri.URI { return h.uri }
func (h *DiskFile) FileIdentity() FileIdentity {
return FileIdentity{
URI: h.uri,
Hash: h.hash,
}
}
func (h *DiskFile) Saved() bool { return true }
func (h *DiskFile) Version() int32 { return 0 }
func (h *DiskFile) Content() ([]byte, error) { return h.content, h.err }
// ReadFile stats and (maybe) reads the file, updates the cache, and returns it.
func (fs *memoizedFS) ReadFile(ctx context.Context, uri uri.URI) (FileHandle, error) {
id, mtime, err := GetFileID(uri.Filename())
if err != nil {
// file does not exist
return &DiskFile{
err: err,
uri: uri,
}, nil
}
// We check if the file has changed by comparing modification times. Notably,
// this is an imperfect heuristic as various systems have low resolution
// mtimes (as much as 1s on WSL or s390x builders), so we only cache
// filehandles if mtime is old enough to be reliable, meaning that we don't
// expect a subsequent write to have the same mtime.
//
// The coarsest mtime precision we've seen in practice is 1s, so consider
// mtime to be unreliable if it is less than 2s old. Capture this before
// doing anything else.
recentlyModified := time.Since(mtime) < 2*time.Second
fs.mu.Lock()
fhs, ok := fs.filesByID[id]
if ok && fhs[0].modTime.Equal(mtime) {
var fh *DiskFile
// We have already seen this file and it has not changed.
for _, h := range fhs {
if h.uri == uri {
fh = h
break
}
}
// No file handle for this exact URI. Create an alias, but share content.
if fh == nil {
newFH := *fhs[0]
newFH.uri = uri
fh = &newFH
fhs = append(fhs, fh)
fs.filesByID[id] = fhs
}
fs.mu.Unlock()
return fh, nil
}
fs.mu.Unlock()
// Unknown file, or file has changed. Read (or re-read) it.
fh, err := readFile(ctx, uri, mtime) // ~25us
if err != nil {
return nil, err // e.g. cancelled (not: read failed)
}
fs.mu.Lock()
if !recentlyModified {
fs.filesByID[id] = []*DiskFile{fh}
} else {
delete(fs.filesByID, id)
}
fs.mu.Unlock()
return fh, nil
}
// ioLimit limits the number of parallel file reads per process.
var ioLimit = make(chan struct{}, 128)
func readFile(ctx context.Context, uri uri.URI, mtime time.Time) (*DiskFile, error) {
select {
case ioLimit <- struct{}{}:
case <-ctx.Done():
return nil, ctx.Err()
}
defer func() { <-ioLimit }()
// It is possible that a race causes us to read a file with different file
// ID, or whose mtime differs from the given mtime. However, in these cases
// we expect the client to notify of a subsequent file change, and the file
// content should be eventually consistent.
content, err := os.ReadFile(uri.Filename()) // ~20us
if err != nil {
content = nil // just in case
}
return &DiskFile{
modTime: mtime,
uri: uri,
content: content,
hash: HashOf(content),
err: err,
}, nil
}
package cache
import (
"context"
"sync"
log "github.com/sirupsen/logrus"
"go.lsp.dev/uri"
)
// An overlayFS is a source.FileSource that keeps track of overlays on top of a
// delegate FileSource.
type overlayFS struct {
delegate FileSource
mu sync.Mutex
overlays map[uri.URI]*Overlay
}
func NewOverlayFS(delegate FileSource) *overlayFS {
return &overlayFS{
delegate: delegate,
overlays: make(map[uri.URI]*Overlay),
}
}
// Overlays returns a new unordered array of overlays.
func (fs *overlayFS) Overlays() []*Overlay {
fs.mu.Lock()
defer fs.mu.Unlock()
overlays := make([]*Overlay, 0, len(fs.overlays))
for _, overlay := range fs.overlays {
overlays = append(overlays, overlay)
}
return overlays
}
func (fs *overlayFS) ReadFile(ctx context.Context, uri uri.URI) (FileHandle, error) {
log.Debug("read uri: ", uri)
fs.mu.Lock()
overlay, ok := fs.overlays[uri]
fs.mu.Unlock()
if ok {
return overlay, nil
}
return fs.delegate.ReadFile(ctx, uri)
}
// Update only updates overlays
func (fs *overlayFS) Update(ctx context.Context, changes []*FileChange) error {
for _, change := range changes {
var base []byte
if change.From == FileChangeTypeDidChange {
fh, err := fs.ReadFile(ctx, change.URI)
if err != nil {
return err
}
base, err = fh.Content()
if err != nil {
return err
}
}
overlay := NewOverlay(change.URI, change.FullContent(base), int32(change.Version))
log.Debug("new overlay content: ", string(overlay.content), "uri", change.URI)
fs.mu.Lock()
fs.overlays[change.URI] = overlay
fs.mu.Unlock()
}
return nil
}
// An Overlay is a file open in the editor. It may have unsaved edits.
// It implements the source.FileHandle interface.
type Overlay struct {
uri uri.URI
content []byte
hash Hash
version int32
// saved is true if a file matches the state on disk,
// and therefore does not need to be part of the overlay sent to go/packages.
saved bool
}
func NewOverlay(uri uri.URI, content []byte, version int32) *Overlay {
return &Overlay{
uri: uri,
content: content,
version: version,
hash: HashOf(content),
}
}
func (o *Overlay) URI() uri.URI { return o.uri }
func (o *Overlay) FileIdentity() FileIdentity {
return FileIdentity{
URI: o.uri,
Hash: o.hash,
}
}
func (o *Overlay) Content() ([]byte, error) { return o.content, nil }
func (o *Overlay) Version() int32 { return o.version }
func (o *Overlay) Saved() bool { return o.saved }
package cache
import (
"sort"
"sync"
"github.com/joyme123/thrift-ls/lsp/lsputils"
"github.com/joyme123/thrift-ls/parser"
log "github.com/sirupsen/logrus"
"go.lsp.dev/uri"
)
type IncludeNode struct {
indegree []uri.URI // uri of nodes which include this node
outdegree []uri.URI // includes
}
func (n *IncludeNode) Clone() *IncludeNode {
newNode := &IncludeNode{}
if len(n.indegree) > 0 {
newNode.indegree = make([]uri.URI, len(n.indegree))
}
if len(n.outdegree) > 0 {
newNode.outdegree = make([]uri.URI, len(n.outdegree))
}
copy(newNode.indegree, n.indegree)
copy(newNode.outdegree, n.outdegree)
return newNode
}
func (n *IncludeNode) InDegree() []uri.URI {
return n.indegree
}
func (n *IncludeNode) OutDegree() []uri.URI {
return n.outdegree
}
type IncludeGraph struct {
mu sync.RWMutex
mapper map[uri.URI]*IncludeNode
}
func NewIncludeGraph() *IncludeGraph {
return &IncludeGraph{
mapper: make(map[uri.URI]*IncludeNode),
}
}
func (g *IncludeGraph) Get(file uri.URI) *IncludeNode {
g.mu.RLock()
defer g.mu.RUnlock()
return g.mapper[file]
}
func (g *IncludeGraph) Set(file uri.URI, includes []*parser.Include) {
g.mu.Lock()
defer g.mu.Unlock()
includeURIs := make([]uri.URI, 0, len(includes))
for _, inc := range includes {
if inc.BadNode || inc.Path == nil || inc.Path.ChildrenBadNode() || inc.Path.Value == nil {
continue
}
includeURI := lsputils.IncludeURI(file, inc.Path.Value.Text)
includeURIs = append(includeURIs, includeURI)
}
sort.SliceStable(includeURIs, func(i, j int) bool {
return includeURIs[i] < includeURIs[j]
})
node, ok := g.mapper[file]
if ok {
if len(includeURIs) == len(node.outdegree) {
sort.SliceStable(node.outdegree, func(i, j int) bool {
return node.outdegree[i] < node.outdegree[j]
})
equal := true
for i := range includeURIs {
if includeURIs[i] != node.outdegree[i] {
equal = false
break
}
}
if equal {
return
}
}
g.removeWithoutLock(file)
} else {
node = &IncludeNode{}
}
for _, inc := range includeURIs {
node.outdegree = append(node.outdegree, inc)
outNode, exist := g.mapper[inc]
if !exist {
outNode = &IncludeNode{}
g.mapper[inc] = outNode
}
outNode.indegree = append(outNode.indegree, file)
}
g.mapper[file] = node
return
}
func (g *IncludeGraph) Remove(file uri.URI) {
g.mu.Lock()
defer g.mu.Unlock()
g.removeWithoutLock(file)
}
func (g *IncludeGraph) Clone() *IncludeGraph {
g.mu.RLock()
defer g.mu.RUnlock()
newG := NewIncludeGraph()
for i := range g.mapper {
newG.mapper[i] = g.mapper[i].Clone()
}
return newG
}
func (g *IncludeGraph) removeWithoutLock(file uri.URI) {
node, ok := g.mapper[file]
if !ok {
return
}
for _, outFile := range node.outdegree {
outNode, exist := g.mapper[outFile]
if !exist {
continue
}
// update outNode indegree
for i := range outNode.indegree {
if outNode.indegree[i] == file {
outNode.indegree = append(outNode.indegree[0:i], outNode.indegree[i+1:]...)
if len(outNode.indegree) == 0 {
outNode.indegree = nil
}
break
}
}
if len(outNode.indegree) == 0 && len(outNode.outdegree) == 0 {
delete(g.mapper, outFile)
}
}
node.outdegree = nil
if len(node.indegree) == 0 && len(node.outdegree) == 0 {
delete(g.mapper, file)
}
}
func (g *IncludeGraph) Debug() {
for file, node := range g.mapper {
log.Debugln("file: ", file, "node: ", node)
}
}
package cache
import (
"encoding/json"
"fmt"
"sync"
"github.com/joyme123/thrift-ls/lsp/mapper"
"github.com/joyme123/thrift-ls/parser"
log "github.com/sirupsen/logrus"
"go.lsp.dev/uri"
)
type ParseCaches struct {
mu sync.RWMutex
caches map[uri.URI]*ParsedFile
tokens map[string]struct{}
}
func NewParseCaches() *ParseCaches {
return &ParseCaches{
caches: make(map[uri.URI]*ParsedFile),
}
}
func (c *ParseCaches) Set(filePath uri.URI, res *ParsedFile) {
c.mu.Lock()
c.caches[filePath] = res
c.tokens = nil
c.mu.Unlock()
}
func (c *ParseCaches) Get(filePath uri.URI) *ParsedFile {
c.mu.RLock()
defer c.mu.RUnlock()
return c.caches[filePath]
}
func (c *ParseCaches) Forget(filePath uri.URI) {
c.mu.Lock()
defer c.mu.Unlock()
delete(c.caches, filePath)
c.tokens = nil
}
func (c *ParseCaches) Clone() *ParseCaches {
c.mu.RLock()
defer c.mu.RUnlock()
clone := make(map[uri.URI]*ParsedFile)
for i := range c.caches {
clone[i] = c.caches[i]
}
newCaches := &ParseCaches{
caches: clone,
}
return newCaches
}
func (c *ParseCaches) Tokens() map[string]struct{} {
if len(c.tokens) > 0 {
return c.tokens
}
tokens := make(map[string]struct{})
for _, parsed := range c.caches {
if parsed.ast == nil {
continue
}
for _, item := range parsed.ast.Includes {
if item.Path == nil || item.Path.BadNode {
continue
}
tokens[item.Name()] = struct{}{}
}
for _, item := range parsed.ast.Enums {
if item.Name == nil || item.Name.BadNode || item.Name.Name == nil || item.Name.Name.BadNode {
continue
}
tokens[item.Name.Name.Text] = struct{}{}
for i := range item.Values {
if item.Values[i].BadNode || item.Values[i].Name == nil || item.Values[i].Name.BadNode ||
item.Values[i].Name.Name == nil || item.Values[i].Name.Name.BadNode {
continue
}
tokens[item.Name.Name.Text] = struct{}{}
}
}
for _, item := range parsed.ast.Consts {
if item.Name == nil || item.Name.BadNode || item.Name.Name == nil || item.Name.Name.BadNode {
continue
}
tokens[item.Name.Name.Text] = struct{}{}
}
for _, item := range parsed.ast.Typedefs {
if item.Alias == nil || item.Alias.BadNode || item.Alias.Name == nil || item.Alias.Name.BadNode {
continue
}
tokens[item.Alias.Name.Text] = struct{}{}
}
for _, item := range parsed.ast.Services {
if item.Name == nil || item.Name.BadNode {
continue
}
tokens[item.Name.Name.Text] = struct{}{}
}
for _, item := range parsed.ast.Unions {
if item.Name == nil || item.Name.BadNode {
continue
}
tokens[item.Name.Name.Text] = struct{}{}
for i := range item.Fields {
if item.Fields[i].BadNode || item.Fields[i].Identifier == nil || item.Fields[i].Identifier.Name == nil || item.Fields[i].Identifier.Name.BadNode {
continue
}
tokens[item.Fields[i].Identifier.Name.Text] = struct{}{}
}
}
for _, item := range parsed.ast.Structs {
if item.Identifier == nil || item.Identifier.BadNode || item.Identifier.Name == nil || item.Identifier.Name.BadNode {
continue
}
tokens[item.Identifier.Name.Text] = struct{}{}
for _, field := range item.Fields {
if field.BadNode || field.Identifier == nil || field.Identifier.BadNode || field.Identifier.Name == nil || field.Identifier.Name.BadNode {
continue
}
tokens[field.Identifier.Name.Text] = struct{}{}
}
}
for _, item := range parsed.ast.Exceptions {
if item.Name == nil || item.Name.BadNode || item.Name.Name == nil || item.Name.Name.BadNode {
continue
}
tokens[item.Name.Name.Text] = struct{}{}
for i := range item.Fields {
if item.Fields[i].BadNode || item.Fields[i].Identifier == nil || item.Fields[i].Identifier.Name == nil || item.Fields[i].Identifier.Name.BadNode {
continue
}
tokens[item.Fields[i].Identifier.Name.Text] = struct{}{}
}
}
}
c.tokens = tokens
return tokens
}
type ParsedFile struct {
fh FileHandle
// ast is latest available ast. current fh content may not to be parsed.
// so it may be nil when fh content is invalid
ast *parser.Document
mapper *mapper.Mapper
// errs hold all ast parsing errors
errs []parser.ParserError
}
func (p *ParsedFile) Mapper() *mapper.Mapper {
return p.mapper
}
func (p *ParsedFile) AST() *parser.Document {
return p.ast
}
func (p *ParsedFile) Errors() []parser.ParserError {
return p.errs
}
func (p *ParsedFile) AggregatedError() error {
if len(p.errs) == 0 {
return nil
}
return fmt.Errorf("aggregated error: %v", p.errs)
}
// DumpAST is for debug
func (p *ParsedFile) DumpAST() {
if p.ast == nil {
return
}
data, _ := json.MarshalIndent(p.ast, "", " ")
fmt.Println(string(data))
}
// TODO(jpf): use promise
func Parse(fh FileHandle) (*ParsedFile, error) {
content, err := fh.Content()
if err != nil {
return nil, err
}
pf := &ParsedFile{
fh: fh,
}
psr := &parser.PEGParser{}
ast, errs := psr.Parse(fh.URI().Filename(), content)
for i := range errs {
parserErr, ok := errs[i].(parser.ParserError)
if ok {
pf.errs = append(pf.errs, parserErr)
}
}
pf.ast = ast
if len(errs) > 0 {
log.Debugf("peg parsed err: %v", errs)
}
mp := mapper.NewMapper(fh.URI(), content)
pf.mapper = mp
return pf, nil
}
// type ParseError struct {
// Pos Position
// Msg string
// }
//
// func (e *ParseError) Error() string {
// if e.Pos.Filename != "" || e.Pos.IsValid() {
// // don't print "<unknown position>"
// // TODO(gri) reconsider the semantics of Position.IsValid
// return e.Pos.String() + ": " + e.Msg
// }
// return e.Msg
// }
type Position struct {
Filename string // filename, if any
Offset int // offset, starting at 0
Line int // line number, starting at 1
Column int // column number, starting at 1 (byte count)
}
func (p Position) IsValid() bool {
return p.Line > 0
}
func (p Position) String() string {
s := p.Filename
if p.IsValid() {
if s != "" {
s += ":"
}
s += fmt.Sprintf("%d", p.Line)
if p.Column != 0 {
s += fmt.Sprintf(":%d", p.Column)
}
}
if s == "" {
s = "-"
}
return s
}
package cache
import (
"context"
"fmt"
"math/rand"
"sync"
"go.lsp.dev/uri"
)
type Session struct {
id int64
// indicates whether initialized with initialize params
// if initialize params doesn't contain any folder. this is false
initializedMu sync.Mutex
initialized bool
// cache is shared global
cache *Cache
viewMu sync.Mutex
views []*View
viewMap map[uri.URI]*View // map of URI->best view
// session holds overlayFS to manage file content
// view, snapshot only holds FileSource to read from overlayFS
*overlayFS
}
func NewSession(cache *Cache) *Session {
sess := &Session{
id: rand.Int63(),
cache: cache,
views: make([]*View, 0),
viewMap: make(map[uri.URI]*View),
overlayFS: NewOverlayFS(cache),
}
return sess
}
func (s *Session) Initialize(fn func()) {
s.initializedMu.Lock()
defer s.initializedMu.Unlock()
if s.initialized {
return
}
s.initialized = true
fn()
}
func (s *Session) CreateView(folder uri.URI) {
view := NewView(folder.Filename(), folder, s.overlayFS, s.cache.store)
s.views = append(s.views, view)
}
func (s *Session) ViewOf(fileURI uri.URI) (*View, error) {
s.viewMu.Lock()
defer s.viewMu.Unlock()
if view, ok := s.viewMap[fileURI]; ok {
return view, nil
}
if len(s.views) == 0 {
return nil, fmt.Errorf("views is nil")
}
for i := range s.views {
if s.views[i].ContainsFile(fileURI) {
s.viewMap[fileURI] = s.views[i]
return s.views[i], nil
}
}
for i := range s.views {
if s.views[i].FileKnown(fileURI) {
s.viewMap[fileURI] = s.views[i]
return s.views[i], nil
}
}
return s.views[0], nil
}
func (s *Session) UpdateOverlayFS(ctx context.Context, changes []*FileChange) error {
return s.overlayFS.Update(ctx, changes)
}
package cache
import (
"context"
"math/rand"
"sync"
"github.com/joyme123/thrift-ls/lsp/memoize"
log "github.com/sirupsen/logrus"
"go.lsp.dev/uri"
)
type Snapshot struct {
id int64
view *View
// ctx is used to cancel background job
ctx context.Context
refCount sync.WaitGroup
files *FilesMap
store *memoize.Store
graph *IncludeGraph
parsedCache *ParseCaches
}
func NewSnapshot(view *View, store *memoize.Store) *Snapshot {
snapshot := &Snapshot{
id: rand.Int63(),
view: view,
store: store,
ctx: context.Background(),
refCount: sync.WaitGroup{},
graph: NewIncludeGraph(),
parsedCache: NewParseCaches(),
files: &FilesMap{
files: make(map[uri.URI]FileHandle),
overlays: make(map[uri.URI]*Overlay),
},
}
return snapshot
}
func (s *Snapshot) Acquire() func() {
s.refCount.Add(1)
return s.refCount.Done
}
func (s *Snapshot) Initialize(ctx context.Context) {
}
func (s *Snapshot) Graph() *IncludeGraph {
return s.graph
}
func (s *Snapshot) ReadFile(ctx context.Context, uri uri.URI) (FileHandle, error) {
log.Debugln("snapshot read file", uri)
s.view.MarkFileKnown(uri)
if fh, ok := s.files.Get(uri); ok {
return fh, nil
}
log.Debugln("snapshot read from fs")
fh, err := s.view.fs.ReadFile(ctx, uri)
if err != nil {
return nil, err
}
s.files.Set(uri, fh)
return fh, nil
}
// ForgetFile is called when file changed or removed
// it remove file cache and parsed cache
func (s *Snapshot) ForgetFile(uri uri.URI) {
s.files.Forget(uri)
s.graph.Remove(uri)
s.parsedCache.Forget(uri)
}
func (s *Snapshot) Parse(ctx context.Context, uri uri.URI) (*ParsedFile, error) {
if parsedFile := s.parsedCache.Get(uri); parsedFile != nil {
return parsedFile, nil
}
fh, err := s.ReadFile(ctx, uri)
if err != nil {
return nil, err
}
// DEBUG
// content, _ := fh.Content()
// log.Debugln("parse content:", string(content))
pf, err := Parse(fh)
if err != nil {
log.Debugf("snapshot parse err: %v", err)
return nil, err
}
if pf.AST() != nil {
s.graph.Set(uri, pf.AST().Includes)
}
s.parsedCache.Set(uri, pf)
return pf, nil
}
func (s *Snapshot) Tokens() map[string]struct{} {
return s.parsedCache.Tokens()
}
func (s *Snapshot) clone() (*Snapshot, func()) {
snap := &Snapshot{
id: rand.Int63(),
view: s.view,
ctx: context.Background(),
// TODO(jpf): file change 没有更新,导致读到旧的缓存
files: s.files.Clone(),
// files: &FilesMap{
// files: make(map[uri.URI]FileHandle),
// overlays: make(map[uri.URI]*Overlay),
// },
graph: s.graph.Clone(),
parsedCache: s.parsedCache.Clone(),
}
return snap, snap.Acquire()
}
func BuildSnapshotForTest(files []*FileChange) *Snapshot {
store := &memoize.Store{}
c := New(store)
fs := NewOverlayFS(c)
fs.Update(context.TODO(), files)
view := NewView("test", "file:///tmp", fs, store)
ss := NewSnapshot(view, store)
for _, f := range files {
ss.Parse(context.TODO(), f.URI)
}
return ss
}
package cache
import (
"context"
"math/rand"
"strings"
"sync"
"github.com/joyme123/thrift-ls/lsp/memoize"
log "github.com/sirupsen/logrus"
"go.lsp.dev/uri"
)
type View struct {
id int64
// name is the user-specified name of this view.
name string
// TODO(jpf): view 的设计并不合理
// workspace folder
folder uri.URI
fs FileSource
knownFilesMu sync.Mutex
knownFiles map[uri.URI]bool
// Track the latest snapshot via the snapshot field, guarded by snapshotMu.
//
// Invariant: whenever the snapshot field is overwritten, destroy(snapshot)
// is called on the previous (overwritten) snapshot while snapshotMu is held,
// incrementing snapshotWG. During shutdown the final snapshot is
// overwritten with nil and destroyed, guaranteeing that all observed
// snapshots have been destroyed via the destroy method, and snapshotWG may
// be waited upon to let these destroy operations complete.
snapshotMu sync.Mutex
snapshot *Snapshot // latest snapshot; nil after shutdown has been called
snapshotRelease func()
}
func NewView(name string, folder uri.URI, fs FileSource, store *memoize.Store) *View {
view := &View{
id: rand.Int63(),
name: name,
folder: folder,
fs: fs,
knownFiles: make(map[uri.URI]bool),
}
view.snapshot = NewSnapshot(view, store)
view.snapshotRelease = view.snapshot.Acquire()
asyncRelease := view.snapshot.Acquire()
go func() {
defer asyncRelease()
view.snapshotMu.Lock()
view.snapshot.Initialize(context.Background())
view.snapshotMu.Unlock()
return
}()
return view
}
func (v *View) ContainsFile(uri uri.URI) bool {
// folder: file:///workdir/
// file: file:///workdir/file.idl
folder := v.folder.Filename()
file := uri.Filename()
if !strings.HasPrefix(file, folder) {
return false
}
folder = strings.TrimSuffix(folder, "/")
file = strings.TrimPrefix(file, folder)
if strings.HasPrefix(file, "/") {
return true
}
return false
}
func (v *View) MarkFileKnown(fileURI uri.URI) {
v.knownFilesMu.Lock()
defer v.knownFilesMu.Unlock()
if v.knownFiles == nil {
v.knownFiles = make(map[uri.URI]bool)
}
v.knownFiles[fileURI] = true
}
func (v *View) FileKnown(uri uri.URI) bool {
v.knownFilesMu.Lock()
defer v.knownFilesMu.Unlock()
return v.knownFiles[uri]
}
func (v *View) FileChange(ctx context.Context, changes []*FileChange, postFns ...func()) {
for _, change := range changes {
v.MarkFileKnown(change.URI)
}
// snapshot clone
newSnapshot, release := v.snapshot.clone()
// release previous snapshot
v.snapshotRelease()
v.snapshotMu.Lock()
v.snapshot = newSnapshot
for _, change := range changes {
v.snapshot.ForgetFile(change.URI)
}
v.snapshotMu.Unlock()
v.snapshotRelease = release
asyncRelease := v.snapshot.Acquire()
// handle current snapshot
// TODO(jpf): 异步 parse 和 completion 的顺序问题
// go func() {
defer asyncRelease()
uris := make(map[uri.URI]struct{})
for _, change := range changes {
uris[change.URI] = struct{}{}
}
for uri := range uris {
v.snapshotMu.Lock()
_, err := v.snapshot.Parse(ctx, uri)
v.snapshotMu.Unlock()
if err != nil {
log.Errorf("parse error: %v", err)
}
}
for i := range postFns {
postFns[i]()
}
// }()
return
}
func (v *View) Snapshot() (*Snapshot, func()) {
v.snapshotMu.Lock()
defer v.snapshotMu.Unlock()
if v.snapshot == nil {
}
return v.snapshot, v.snapshot.Acquire()
}
package lsp
import (
"context"
"github.com/joyme123/protocol"
"github.com/joyme123/thrift-ls/lsp/codejump"
)
func (s *Server) definition(ctx context.Context, params *protocol.DefinitionParams) (result []protocol.Location, err error) {
file := params.TextDocument.URI
view, err := s.session.ViewOf(file)
if err != nil {
return nil, err
}
ss, release := view.Snapshot()
defer release()
return codejump.Definition(ctx, ss, params.TextDocument.URI, params.Position)
}
func (s *Server) references(ctx context.Context, params *protocol.ReferenceParams) (result []protocol.Location, err error) {
file := params.TextDocument.URI
view, err := s.session.ViewOf(file)
if err != nil {
return nil, err
}
ss, release := view.Snapshot()
defer release()
return codejump.Reference(ctx, ss, params.TextDocument.URI, params.Position)
}
func (s *Server) typeDefinition(ctx context.Context, params *protocol.TypeDefinitionParams) (result []protocol.Location, err error) {
file := params.TextDocument.URI
view, err := s.session.ViewOf(file)
if err != nil {
return nil, err
}
ss, release := view.Snapshot()
defer release()
return codejump.TypeDefinition(ctx, ss, params.TextDocument.URI, params.Position)
}
package codejump
import (
"context"
"errors"
"github.com/joyme123/protocol"
"github.com/joyme123/thrift-ls/lsp/cache"
"github.com/joyme123/thrift-ls/lsp/lsputils"
"github.com/joyme123/thrift-ls/lsp/types"
"github.com/joyme123/thrift-ls/parser"
log "github.com/sirupsen/logrus"
"go.lsp.dev/uri"
)
func Definition(ctx context.Context, ss *cache.Snapshot, file uri.URI, pos protocol.Position) (res []protocol.Location, err error) {
res = make([]protocol.Location, 0)
pf, err := ss.Parse(ctx, file)
if err != nil {
return
}
if pf.AST() == nil {
err = errors.New("parse ast failed")
return
}
astPos, err := pf.Mapper().LSPPosToParserPosition(types.Position{Line: pos.Line, Character: pos.Character})
if err != nil {
return
}
nodePath := parser.SearchNodePathByPosition(pf.AST(), astPos)
targetNode := nodePath[len(nodePath)-1]
switch targetNode.Type() {
case "TypeName":
return typeNameDefinition(ctx, ss, file, pf.AST(), targetNode)
case "ConstValue":
return constValueTypeDefinition(ctx, ss, file, pf.AST(), targetNode)
case "IdentifierName": // service extends
return serviceDefinition(ctx, ss, file, pf.AST(), targetNode)
}
return
}
func serviceDefinition(ctx context.Context, ss *cache.Snapshot, file uri.URI, ast *parser.Document, targetNode parser.Node) ([]protocol.Location, error) {
res := make([]protocol.Location, 0)
astFile, id, _, err := ServiceDefinitionIdentifier(ctx, ss, file, ast, targetNode)
if err != nil {
return res, err
}
if id != nil {
res = append(res, jump(astFile, id.Name))
}
return res, nil
}
func ServiceDefinitionIdentifier(ctx context.Context, ss *cache.Snapshot, file uri.URI, ast *parser.Document, targetNode parser.Node) (uri.URI, *parser.Identifier, string, error) {
identifierName := targetNode.(*parser.IdentifierName)
include, identifier := lsputils.ParseIdent(file, ast.Includes, identifierName.Text)
var astFile uri.URI
if include == "" {
astFile = file
} else {
path := lsputils.GetIncludePath(ast, include)
if path == "" { // doesn't match any include path
return "", nil, "", nil
}
astFile = lsputils.IncludeURI(file, path)
}
// now we can find destinate definition in `dstAst` by `identifier`
dstAst, err := ss.Parse(ctx, astFile)
if err != nil {
return astFile, nil, "", err
}
if len(dstAst.Errors()) > 0 {
log.Errorf("parse error: %v", dstAst.Errors())
}
dstService := GetServiceNode(dstAst.AST(), identifier)
if dstService != nil {
return astFile, dstService.Name, "Service", nil
}
return astFile, nil, "", nil
}
func typeNameDefinition(ctx context.Context, ss *cache.Snapshot, file uri.URI, ast *parser.Document, targetNode parser.Node) ([]protocol.Location, error) {
res := make([]protocol.Location, 0)
astFile, id, _, err := TypeNameDefinitionIdentifier(ctx, ss, file, ast, targetNode)
if err != nil {
return res, err
}
if id != nil {
res = append(res, jump(astFile, id.Name))
}
return res, nil
}
func TypeNameDefinitionIdentifier(ctx context.Context, ss *cache.Snapshot, file uri.URI, ast *parser.Document, targetNode parser.Node) (uri.URI, *parser.Identifier, string, error) {
typeName := targetNode.(*parser.TypeName)
typeV := typeName.Name
if IsBasicType(typeV) {
return "", nil, "", nil
}
include, identifier := lsputils.ParseIdent(file, ast.Includes, typeV)
var astFile uri.URI
if include == "" {
astFile = file
} else {
path := lsputils.GetIncludePath(ast, include)
if path == "" { // doesn't match any include path
return "", nil, "", nil
}
astFile = lsputils.IncludeURI(file, path)
}
// now we can find destinate definition in `dstAst` by `identifier`
dstAst, err := ss.Parse(ctx, astFile)
if err != nil {
return astFile, nil, "", err
}
if len(dstAst.Errors()) > 0 {
log.Errorf("parse error: %v", dstAst.Errors())
}
// struct, exception, enum or union
dstException := GetExceptionNode(dstAst.AST(), identifier)
if dstException != nil {
return astFile, dstException.Name, "Exception", nil
}
dstStruct := GetStructNode(dstAst.AST(), identifier)
if dstStruct != nil {
return astFile, dstStruct.Identifier, "Struct", nil
}
dstEnum := GetEnumNode(dstAst.AST(), identifier)
if dstEnum != nil {
return astFile, dstEnum.Name, "Enum", nil
}
dstUnion := GetUnionNode(dstAst.AST(), identifier)
if dstUnion != nil {
return astFile, dstUnion.Name, "Union", nil
}
dstTypedef := GetTypedefNode(dstAst.AST(), identifier)
if dstTypedef != nil {
return astFile, dstTypedef.Alias, "Typedef", nil
}
return astFile, nil, "", nil
}
// search enum
func constValueTypeDefinition(ctx context.Context, ss *cache.Snapshot, file uri.URI, ast *parser.Document, targetNode parser.Node) ([]protocol.Location, error) {
res := make([]protocol.Location, 0)
astFile, id, err := ConstValueTypeDefinitionIdentifier(ctx, ss, file, ast, targetNode)
if err != nil {
return res, err
}
if id != nil {
res = append(res, jump(astFile, id))
}
return res, nil
}
func ConstValueTypeDefinitionIdentifier(ctx context.Context, ss *cache.Snapshot, file uri.URI, ast *parser.Document, targetNode parser.Node) (uri.URI, *parser.Identifier, error) {
constValue := targetNode.(*parser.ConstValue)
if constValue.TypeName != "identifier" {
return "", nil, nil
}
include, identifier := lsputils.ParseIdent(file, ast.Includes, constValue.Value.(string))
var astFile uri.URI
if include == "" {
astFile = file
} else {
path := lsputils.GetIncludePath(ast, include)
if path == "" { // doesn't match any include path, maybe enum value
include = ""
identifier = constValue.Value.(string)
astFile = file
} else {
astFile = lsputils.IncludeURI(file, path)
}
}
// now we can find destinate definition in `dstAst` by `identifier`
dstAst, err := ss.Parse(ctx, astFile)
if err != nil {
return astFile, nil, err
}
dstEnumValueIdentifier := GetEnumValueIdentifierNode(dstAst.AST(), identifier)
if dstEnumValueIdentifier != nil {
return astFile, dstEnumValueIdentifier, nil
}
constIdentifier := GetConstIdentifierNode(dstAst.AST(), identifier)
if constIdentifier != nil {
return astFile, constIdentifier, nil
}
return astFile, nil, nil
}
package codejump
import (
"context"
"errors"
"github.com/joyme123/protocol"
"github.com/joyme123/thrift-ls/format"
"github.com/joyme123/thrift-ls/lsp/cache"
"github.com/joyme123/thrift-ls/lsp/lsputils"
"github.com/joyme123/thrift-ls/lsp/types"
"github.com/joyme123/thrift-ls/parser"
log "github.com/sirupsen/logrus"
"go.lsp.dev/uri"
)
func Hover(ctx context.Context, ss *cache.Snapshot, file uri.URI, pos protocol.Position) (res string, err error) {
pf, err := ss.Parse(ctx, file)
if err != nil {
return
}
if pf.AST() == nil {
err = errors.New("parse ast failed")
return
}
astPos, err := pf.Mapper().LSPPosToParserPosition(types.Position{Line: pos.Line, Character: pos.Character})
if err != nil {
return
}
nodePath := parser.SearchNodePathByPosition(pf.AST(), astPos)
targetNode := nodePath[len(nodePath)-1]
log.Info("node type:", targetNode.Type())
switch targetNode.Type() {
case "TypeName":
return hoverDefinition(ctx, ss, file, pf.AST(), targetNode)
case "ConstValue":
return hoverConstValue(ctx, ss, file, pf.AST(), targetNode)
case "IdentifierName": // service extends
return hoverService(ctx, ss, file, pf.AST(), targetNode)
}
return
}
func hoverService(ctx context.Context, ss *cache.Snapshot, file uri.URI, ast *parser.Document, targetNode parser.Node) (string, error) {
identifierName := targetNode.(*parser.IdentifierName)
name := identifierName.Text
include, identifier := lsputils.ParseIdent(file, ast.Includes, name)
var astFile uri.URI
if include == "" {
astFile = file
} else {
path := lsputils.GetIncludePath(ast, include)
if path == "" { // doesn't match any include path
return "", nil
}
astFile = lsputils.IncludeURI(file, path)
}
// now we can find destinate definition in `dstAst` by `identifier`
dstAst, err := ss.Parse(ctx, astFile)
if err != nil {
return "", err
}
if len(dstAst.Errors()) > 0 {
log.Errorf("parse error: %v", dstAst.Errors())
}
dstService := GetServiceNode(dstAst.AST(), identifier)
if dstService != nil {
return format.MustFormatService(dstService), nil
}
return "", nil
}
func hoverDefinition(ctx context.Context, ss *cache.Snapshot, file uri.URI, ast *parser.Document, targetNode parser.Node) (string, error) {
typeName := targetNode.(*parser.TypeName)
typeV := typeName.Name
if IsBasicType(typeV) {
return "", nil
}
include, identifier := lsputils.ParseIdent(file, ast.Includes, typeV)
var astFile uri.URI
if include == "" {
astFile = file
} else {
path := lsputils.GetIncludePath(ast, include)
if path == "" { // doesn't match any include path
return "", nil
}
astFile = lsputils.IncludeURI(file, path)
}
// now we can find destinate definition in `dstAst` by `identifier`
dstAst, err := ss.Parse(ctx, astFile)
if err != nil {
return "", err
}
if len(dstAst.Errors()) > 0 {
log.Errorf("parse error: %v", dstAst.Errors())
}
// struct, exception, enum or union
dstException := GetExceptionNode(dstAst.AST(), identifier)
if dstException != nil {
return format.MustFormatException(dstException), nil
}
dstStruct := GetStructNode(dstAst.AST(), identifier)
if dstStruct != nil {
return format.MustFormatStruct(dstStruct), nil
}
dstEnum := GetEnumNode(dstAst.AST(), identifier)
if dstEnum != nil {
return format.MustFormatEnum(dstEnum), nil
}
dstUnion := GetUnionNode(dstAst.AST(), identifier)
if dstUnion != nil {
return format.MustFormatUnion(dstUnion), nil
}
dstTypedef := GetTypedefNode(dstAst.AST(), identifier)
if dstTypedef != nil {
return format.MustFormatTypedef(dstTypedef), nil
}
return "", nil
}
func hoverConstValue(ctx context.Context, ss *cache.Snapshot, file uri.URI, ast *parser.Document, targetNode parser.Node) (string, error) {
constValue := targetNode.(*parser.ConstValue)
if constValue.TypeName != "identifier" {
return "", nil
}
include, identifier := lsputils.ParseIdent(file, ast.Includes, constValue.Value.(string))
var astFile uri.URI
if include == "" {
astFile = file
} else {
path := lsputils.GetIncludePath(ast, include)
if path == "" { // doesn't match any include path, maybe enum value
include = ""
identifier = constValue.Value.(string)
astFile = file
} else {
astFile = lsputils.IncludeURI(file, path)
}
}
// now we can find destinate definition in `dstAst` by `identifier`
dstAst, err := ss.Parse(ctx, astFile)
if err != nil {
return "", err
}
dstEnum := GetEnumNodeByEnumValue(dstAst.AST(), identifier)
if dstEnum != nil {
return format.MustFormatEnum(dstEnum), nil
}
dstConst := GetConstNode(dstAst.AST(), identifier)
if dstConst != nil {
return format.MustFormatConst(dstConst), nil
}
return "", nil
}
package codejump
import (
"context"
"errors"
"fmt"
"strings"
"github.com/joyme123/protocol"
"github.com/joyme123/thrift-ls/lsp/cache"
"github.com/joyme123/thrift-ls/lsp/lsputils"
"github.com/joyme123/thrift-ls/lsp/types"
"github.com/joyme123/thrift-ls/parser"
utilerrors "github.com/joyme123/thrift-ls/utils/errors"
log "github.com/sirupsen/logrus"
"go.lsp.dev/uri"
)
var validReferenceDefinitionType = map[string]struct{}{
"Struct": {},
"Union": {},
"Enum": {},
"Exception": {},
"Typedef": {},
}
func Reference(ctx context.Context, ss *cache.Snapshot, file uri.URI, pos protocol.Position) (res []protocol.Location, err error) {
res = make([]protocol.Location, 0)
pf, err := ss.Parse(ctx, file)
if err != nil {
return
}
if pf.AST() == nil {
err = errors.New("parse ast failed")
return
}
astPos, err := pf.Mapper().LSPPosToParserPosition(types.Position{Line: pos.Line, Character: pos.Character})
if err != nil {
return
}
nodePath := parser.SearchNodePathByPosition(pf.AST(), astPos)
targetNode := nodePath[len(nodePath)-1]
switch targetNode.Type() {
case "TypeName":
return searchTypeNameReferences(ctx, ss, file, pf.AST(), nodePath, targetNode)
case "IdentifierName":
if len(nodePath) <= 2 {
return
}
// identifierName -> identifier -> definition
parentDefinitionNode := nodePath[len(nodePath)-3]
definitionType := parentDefinitionNode.Type()
if definitionType == "EnumValue" || definitionType == "Const" {
var typeName string
if definitionType == "Const" {
typeName = fmt.Sprintf("%s.%s", lsputils.GetIncludeName(file), targetNode.(*parser.IdentifierName).Text)
} else {
enumNode := nodePath[len(nodePath)-4]
typeName = fmt.Sprintf("%s.%s.%s", lsputils.GetIncludeName(file), enumNode.(*parser.Enum).Name.Name.Text, targetNode.(*parser.IdentifierName).Text)
}
// search in const value
return searchConstValueIdentifierReferences(ctx, ss, file, typeName)
} else if definitionType == "Service" {
svcName := targetNode.(*parser.IdentifierName).Text
if !strings.Contains(svcName, ".") {
svcName = fmt.Sprintf("%s.%s", lsputils.GetIncludeName(file), svcName)
} else {
include, _ := lsputils.ParseIdent(file, pf.AST().Includes, svcName)
path := lsputils.GetIncludePath(pf.AST(), include)
if path != "" { // doesn't match any include path
file = lsputils.IncludeURI(file, path)
}
}
return searchServiceReferences(ctx, ss, file, svcName)
}
if _, ok := validReferenceDefinitionType[definitionType]; !ok {
return
}
// typeName is base.User
typeName := fmt.Sprintf("%s.%s", lsputils.GetIncludeName(file), targetNode.(*parser.IdentifierName).Text)
return searchIdentifierReferences(ctx, ss, file, typeName, definitionType)
case "ConstValue":
return searchConstValueReferences(ctx, ss, file, pf.AST(), nodePath, targetNode)
default:
log.Warningln("unsupport type for reference:", targetNode.Type())
}
return
}
func searchTypeNameReferences(ctx context.Context, ss *cache.Snapshot, file uri.URI, ast *parser.Document, nodePath []parser.Node, targetNode parser.Node) (res []protocol.Location, err error) {
res = make([]protocol.Location, 0)
typeNameNode := targetNode.(*parser.TypeName)
typeName := typeNameNode.Name
if IsBasicType(typeName) {
return
}
var errs []error
// search type definition
definitionFile, identifierNode, definitionType, err := TypeNameDefinitionIdentifier(ctx, ss, file, ast, targetNode)
if err != nil {
errs = append(errs, err)
}
if identifierNode == nil {
return
}
res = append(res, jump(definitionFile, identifierNode.Name))
locations, err := searchIdentifierReferences(ctx, ss, definitionFile, typeName, definitionType)
if err != nil {
errs = append(errs, err)
}
res = append(res, locations...)
if len(errs) > 0 {
err = utilerrors.NewAggregate(errs)
}
return
}
func searchServiceReferences(ctx context.Context, ss *cache.Snapshot, file uri.URI, svcName string) (res []protocol.Location, err error) {
log.Debugln("searchServiceReferences for file:", file, "svcName:", svcName)
var errs []error
// search in it self
locations, err := searchServiceDefinitionReferences(ctx, ss, file, strings.TrimPrefix(svcName, fmt.Sprintf("%s.", lsputils.GetIncludeName(file))))
if err != nil {
errs = append(errs, err)
}
res = append(res, locations...)
// search service references in other file
includeNode := ss.Graph().Get(file)
if includeNode != nil {
if len(includeNode.InDegree()) == 0 && len(includeNode.OutDegree()) == 0 {
ss.Graph().Debug()
}
referenceFiles := includeNode.InDegree()
for _, referenceFile := range referenceFiles {
log.Debugln("reference file: ", referenceFile)
locations, err := searchServiceDefinitionReferences(ctx, ss, referenceFile, svcName)
if err != nil {
errs = append(errs, err)
}
res = append(res, locations...)
}
}
if len(errs) > 0 {
err = utilerrors.NewAggregate(errs)
}
return
}
func searchServiceDefinitionReferences(ctx context.Context, ss *cache.Snapshot, file uri.URI, svcName string) (res []protocol.Location, err error) {
ast, err := ss.Parse(ctx, file)
if err != nil {
return
}
if ast.AST() == nil {
return
}
for _, svc := range ast.AST().Services {
if svc.BadNode || svc.ChildrenBadNode() || svc.Extends == nil || svc.Extends.Name == nil {
continue
}
if svcName == svc.Extends.Name.Text {
res = append(res, jump(file, svc.Extends.Name))
}
}
return
}
func searchIdentifierReferences(ctx context.Context, ss *cache.Snapshot, file uri.URI, typeName string, definitionType string) (res []protocol.Location, err error) {
log.Debugln("searchIdentifierReferences for file:", file, "typeName:", typeName)
var errs []error
// search in it self
locations, err := searchDefinitionIdentifierReferences(ctx, ss, file,
strings.TrimPrefix(typeName, fmt.Sprintf("%s.", lsputils.GetIncludeName(file))), definitionType)
if err != nil {
errs = append(errs, err)
}
res = append(res, locations...)
// search type references in other file
includeNode := ss.Graph().Get(file)
log.Debugln("includeNode: ", includeNode)
if includeNode != nil {
if len(includeNode.InDegree()) == 0 && len(includeNode.OutDegree()) == 0 {
ss.Graph().Debug()
}
referenceFiles := includeNode.InDegree()
for _, referenceFile := range referenceFiles {
log.Debugln("reference file: ", referenceFile)
locations, err := searchDefinitionIdentifierReferences(ctx, ss, referenceFile, typeName, definitionType)
if err != nil {
errs = append(errs, err)
}
res = append(res, locations...)
}
}
if len(errs) > 0 {
err = utilerrors.NewAggregate(errs)
}
return
}
func searchDefinitionIdentifierReferences(ctx context.Context, ss *cache.Snapshot, file uri.URI, typeName string, definitionType string) (res []protocol.Location, err error) {
ast, err := ss.Parse(ctx, file)
if err != nil {
return
}
if ast.AST() == nil {
return
}
var searchFieldType func(fieldType *parser.FieldType)
searchFieldType = func(fieldType *parser.FieldType) {
if fieldType.KeyType != nil && !fieldType.KeyType.BadNode {
searchFieldType(fieldType.KeyType)
}
if fieldType.ValueType != nil && !fieldType.ValueType.BadNode {
searchFieldType(fieldType.ValueType)
}
if fieldType.TypeName != nil && fieldType.TypeName.Name == typeName {
res = append(res, jump(file, fieldType.TypeName))
}
}
jumpField := func(field *parser.Field) {
if field.BadNode || field.FieldType == nil || field.FieldType.BadNode || field.FieldType.TypeName == nil {
return
}
searchFieldType(field.FieldType)
}
for _, svc := range ast.AST().Services {
for _, fn := range svc.Functions {
if fn.BadNode {
continue
}
if fn.FunctionType != nil && !fn.FunctionType.BadNode {
searchFieldType(fn.FunctionType)
}
for i := range fn.Arguments {
log.Debugln("search function args", "definitionType", definitionType, "typeName", typeName)
jumpField(fn.Arguments[i])
}
if fn.Throws != nil {
for i := range fn.Throws.Fields {
jumpField(fn.Throws.Fields[i])
}
}
}
}
if definitionType == "Exception" {
return
}
for _, st := range ast.AST().Structs {
if st.BadNode {
continue
}
for _, field := range st.Fields {
jumpField(field)
}
}
for _, st := range ast.AST().Unions {
if st.BadNode {
continue
}
for _, field := range st.Fields {
jumpField(field)
}
}
for _, st := range ast.AST().Exceptions {
if st.BadNode {
continue
}
for _, field := range st.Fields {
jumpField(field)
}
}
for _, typedef := range ast.AST().Typedefs {
if typedef.BadNode || typedef.T == nil || typedef.T.BadNode {
continue
}
searchFieldType(typedef.T)
}
for _, cst := range ast.AST().Consts {
if cst.BadNode || cst.ConstType == nil || cst.ConstType.BadNode {
continue
}
searchFieldType(cst.ConstType)
}
return res, nil
}
func searchConstValueReferences(ctx context.Context, ss *cache.Snapshot, file uri.URI, ast *parser.Document, nodePath []parser.Node, targetNode parser.Node) (res []protocol.Location, err error) {
res = make([]protocol.Location, 0)
var errs []error
// search type definition
definitionFile, identifierNode, err := ConstValueTypeDefinitionIdentifier(ctx, ss, file, ast, targetNode)
if err != nil {
errs = append(errs, err)
}
if identifierNode == nil {
return
}
res = append(res, jump(definitionFile, identifierNode.Name))
valueName := targetNode.(*parser.ConstValue).Value.(string)
locations, err := searchConstValueIdentifierReferences(ctx, ss, definitionFile, valueName)
if err != nil {
errs = append(errs, err)
}
res = append(res, locations...)
if len(errs) > 0 {
err = utilerrors.NewAggregate(errs)
}
return
}
// a const value maybe a const defintion or enum value definition
func searchConstValueIdentifierReferences(ctx context.Context, ss *cache.Snapshot, file uri.URI, valueName string) (res []protocol.Location, err error) {
var errs []error
// search in it self
locations, err := searchConstValueIdentifierReference(ctx, ss, file, strings.TrimPrefix(valueName, fmt.Sprintf("%s.", lsputils.GetIncludeName(file))))
if err != nil {
errs = append(errs, err)
}
res = append(res, locations...)
// search type references in other file
includeNode := ss.Graph().Get(file)
if includeNode != nil {
referenceFiles := includeNode.InDegree()
for _, referenceFile := range referenceFiles {
locations, err := searchConstValueIdentifierReference(ctx, ss, referenceFile, valueName)
if err != nil {
errs = append(errs, err)
}
res = append(res, locations...)
}
}
if len(errs) > 0 {
err = utilerrors.NewAggregate(errs)
}
return
}
func searchConstValueIdentifierReference(ctx context.Context, ss *cache.Snapshot, file uri.URI, valueName string) (res []protocol.Location, err error) {
ast, err := ss.Parse(ctx, file)
if err != nil {
return
}
if ast.AST() == nil {
return
}
jumpField := func(field *parser.Field) {
if field.BadNode || field.ConstValue == nil || field.ConstValue.TypeName != "identifier" {
return
}
if field.ConstValue.Value == valueName {
res = append(res, jump(file, field.ConstValue))
}
}
for _, st := range ast.AST().Structs {
if st.BadNode {
continue
}
for _, field := range st.Fields {
jumpField(field)
}
}
for _, union := range ast.AST().Unions {
if union.BadNode {
continue
}
for _, field := range union.Fields {
jumpField(field)
}
}
for _, excep := range ast.AST().Exceptions {
if excep.BadNode {
continue
}
for _, field := range excep.Fields {
jumpField(field)
}
}
for _, cst := range ast.AST().Consts {
if cst.BadNode || cst.Value == nil || cst.Value.TypeName != "identifier" {
continue
}
if cst.Value.Value == valueName {
res = append(res, jump(file, cst.Value))
}
}
for _, enum := range ast.AST().Enums {
if enum.BadNode {
continue
}
for _, enumValue := range enum.Values {
if enumValue.ValueNode == nil || enumValue.ValueNode.TypeName != "identifier" {
continue
}
if enumValue.ValueNode.Value == valueName {
res = append(res, jump(file, enumValue.ValueNode))
}
}
}
for _, svc := range ast.AST().Services {
for _, fn := range svc.Functions {
for _, field := range fn.Arguments {
jumpField(field)
}
if fn.Throws != nil {
for _, field := range fn.Throws.Fields {
jumpField(field)
}
}
}
}
return
}
package codejump
import (
"context"
"errors"
"fmt"
"strings"
"github.com/joyme123/protocol"
"github.com/joyme123/thrift-ls/lsp/cache"
"github.com/joyme123/thrift-ls/lsp/lsputils"
"github.com/joyme123/thrift-ls/lsp/types"
"github.com/joyme123/thrift-ls/parser"
"go.lsp.dev/uri"
)
func PrepareRename(ctx context.Context, ss *cache.Snapshot, file uri.URI, pos protocol.Position) (res *protocol.Range, err error) {
pf, err := ss.Parse(ctx, file)
if err != nil {
return
}
if pf.AST() == nil {
err = errors.New("parse ast failed")
return
}
astPos, err := pf.Mapper().LSPPosToParserPosition(types.Position{Line: pos.Line, Character: pos.Character})
if err != nil {
return
}
nodePath := parser.SearchNodePathByPosition(pf.AST(), astPos)
targetNode := nodePath[len(nodePath)-1]
switch targetNode.Type() {
case "IdentifierName", "ConstValue":
rg := lsputils.ASTNodeToRange(targetNode)
return &rg, nil
default:
err = fmt.Errorf("%s doesn't support rename", targetNode.Type())
return
}
}
func Rename(ctx context.Context, ss *cache.Snapshot, file uri.URI, pos protocol.Position, newName string) (res *protocol.WorkspaceEdit, err error) {
pf, err := ss.Parse(ctx, file)
if err != nil {
return
}
if pf.AST() == nil {
err = errors.New("parse ast failed")
return
}
astPos, err := pf.Mapper().LSPPosToParserPosition(types.Position{Line: pos.Line, Character: pos.Character})
if err != nil {
return
}
nodePath := parser.SearchNodePathByPosition(pf.AST(), astPos)
targetNode := nodePath[len(nodePath)-1]
self := lsputils.ASTNodeToRange(targetNode)
switch targetNode.Type() {
case "IdentifierName":
if len(nodePath) <= 2 {
return
}
// identifierName -> identifier -> definition
parentDefinitionNode := nodePath[len(nodePath)-3]
definitionType := parentDefinitionNode.Type()
if definitionType == "EnumValue" || definitionType == "Const" {
var typeName string
if definitionType == "Const" {
typeName = fmt.Sprintf("%s.%s", lsputils.GetIncludeName(file), targetNode.(*parser.IdentifierName).Text)
} else {
enumNode := nodePath[len(nodePath)-4]
typeName = fmt.Sprintf("%s.%s.%s", lsputils.GetIncludeName(file), enumNode.(*parser.Enum).Name.Name.Text, targetNode.(*parser.IdentifierName).Text)
}
// search in const value
locations, err := searchConstValueIdentifierReferences(ctx, ss, file, typeName)
if err != nil {
return nil, err
}
locations = append(locations, protocol.Location{
URI: file,
Range: self,
})
return convertLocationToWorkspaceEdit(locations, file, newName), nil
} else if definitionType == "Service" {
svcName := targetNode.(*parser.IdentifierName).Text
if !strings.Contains(svcName, ".") {
svcName = fmt.Sprintf("%s.%s", lsputils.GetIncludeName(file), svcName)
} else {
include, _ := lsputils.ParseIdent(file, pf.AST().Includes, svcName)
path := lsputils.GetIncludePath(pf.AST(), include)
if path != "" { // doesn't match any include path
file = lsputils.IncludeURI(file, path)
}
}
locations, err := searchServiceReferences(ctx, ss, file, svcName)
if err != nil {
return nil, err
}
locations = append(locations, protocol.Location{
URI: file,
Range: self,
})
return convertLocationToWorkspaceEdit(locations, file, newName), nil
}
if _, ok := validReferenceDefinitionType[definitionType]; !ok {
return
}
// typeName is base.User
typeName := fmt.Sprintf("%s.%s", lsputils.GetIncludeName(file), targetNode.(*parser.IdentifierName).Text)
locations, err := searchIdentifierReferences(ctx, ss, file, typeName, definitionType)
if err != nil {
return nil, err
}
locations = append(locations, protocol.Location{
URI: file,
Range: self,
})
return convertLocationToWorkspaceEdit(locations, file, newName), nil
case "ConstValue":
locations, err := searchConstValueReferences(ctx, ss, file, pf.AST(), nodePath, targetNode)
if err != nil {
return nil, err
}
locations = append(locations, protocol.Location{
URI: file,
Range: self,
})
return convertLocationToWorkspaceEdit(locations, file, newName), nil
default:
err = fmt.Errorf("%s doesn't support rename", targetNode.Type())
return
}
}
func convertLocationToWorkspaceEdit(locations []protocol.Location, fileURI uri.URI, newName string) *protocol.WorkspaceEdit {
res := &protocol.WorkspaceEdit{
Changes: make(map[protocol.DocumentURI][]protocol.TextEdit),
}
for _, loc := range locations {
newText := newName
if loc.URI != fileURI {
newText = lsputils.GetIncludeName(fileURI) + "." + newName
}
res.Changes[loc.URI] = append(res.Changes[loc.URI], protocol.TextEdit{
Range: loc.Range,
NewText: newText,
})
}
return res
}
package codejump
import (
"context"
"errors"
"github.com/joyme123/protocol"
"github.com/joyme123/thrift-ls/lsp/cache"
"github.com/joyme123/thrift-ls/lsp/types"
"github.com/joyme123/thrift-ls/parser"
log "github.com/sirupsen/logrus"
"go.lsp.dev/uri"
)
func TypeDefinition(ctx context.Context, ss *cache.Snapshot, file uri.URI, pos protocol.Position) (res []protocol.Location, err error) {
res = make([]protocol.Location, 0)
pf, err := ss.Parse(ctx, file)
if err != nil {
return
}
if pf.AST() == nil {
err = errors.New("parse ast failed")
return
}
astPos, err := pf.Mapper().LSPPosToParserPosition(types.Position{Line: pos.Line, Character: pos.Character})
if err != nil {
return
}
nodePath := parser.SearchNodePathByPosition(pf.AST(), astPos)
targetNode := nodePath[len(nodePath)-1]
switch targetNode.Type() {
case "TypeName":
return typeNameDefinition(ctx, ss, file, pf.AST(), targetNode)
case "IdentifierName":
// no parent
if len(nodePath) <= 2 {
return res, nil
}
parent := nodePath[len(nodePath)-3]
var fieldType *parser.FieldType
switch parent.Type() {
case "Field":
field := parent.(*parser.Field)
fieldType = field.FieldType
case "Typedef":
typedef := parent.(*parser.Typedef)
fieldType = typedef.T
case "Function":
fn := parent.(*parser.Function)
fieldType = fn.FunctionType
case "Const":
cst := parent.(*parser.Const)
fieldType = cst.ConstType
}
if fieldType != nil && !fieldType.BadNode && fieldType.TypeName != nil {
return typeNameDefinition(ctx, ss, file, pf.AST(), fieldType.TypeName)
}
case "ConstValue":
return constValueTypeDefinition(ctx, ss, file, pf.AST(), targetNode)
default:
log.Warningln("unsupport type for type definition:", targetNode.Type())
}
return
}
package codejump
import (
"strings"
"github.com/joyme123/protocol"
"github.com/joyme123/thrift-ls/lsp/lsputils"
"github.com/joyme123/thrift-ls/parser"
"go.lsp.dev/uri"
)
func GetExceptionNode(ast *parser.Document, name string) *parser.Exception {
if ast == nil {
return nil
}
for _, excep := range ast.Exceptions {
if excep.BadNode || excep.Name == nil {
continue
}
if excep.Name.BadNode || excep.Name.Name == nil {
continue
}
if excep.Name.Name.Text == name {
return excep
}
}
return nil
}
func GetStructNode(ast *parser.Document, name string) *parser.Struct {
if ast == nil {
return nil
}
for _, st := range ast.Structs {
if st.BadNode || st.Identifier == nil {
continue
}
if st.Identifier.BadNode || st.Identifier.Name == nil {
continue
}
if st.Identifier.Name.Text == name {
return st
}
}
return nil
}
func GetUnionNode(ast *parser.Document, name string) *parser.Union {
if ast == nil {
return nil
}
for _, st := range ast.Unions {
if st.BadNode || st.Name == nil {
continue
}
if st.Name.BadNode || st.Name.Name == nil {
continue
}
if st.Name.Name.Text == name {
return st
}
}
return nil
}
func GetEnumNode(ast *parser.Document, name string) *parser.Enum {
if ast == nil {
return nil
}
for _, st := range ast.Enums {
if st.BadNode || st.Name == nil {
continue
}
if st.Name.BadNode || st.Name.Name == nil {
continue
}
if st.Name.Name.Text == name {
return st
}
}
return nil
}
func GetEnumNodeByEnumValue(ast *parser.Document, enumValueName string) *parser.Enum {
if ast == nil {
return nil
}
enumName, _, found := strings.Cut(enumValueName, ".")
if !found {
return nil
}
return GetEnumNode(ast, enumName)
}
// GetEnumValueIdentifierNode enum A { ONE }, ONE is the target node
func GetEnumValueIdentifierNode(ast *parser.Document, name string) *parser.Identifier {
if ast == nil {
return nil
}
enumName, identifier, found := strings.Cut(name, ".")
if !found {
return nil
}
for _, enum := range ast.Enums {
if enum.BadNode || enum.Name == nil || enum.Name.Name == nil || enum.Name.Name.Text != enumName {
continue
}
for _, enumValue := range enum.Values {
if enumValue.Name == nil || enumValue.Name.BadNode || enumValue.Name.Name == nil || enumValue.Name.Name.Text != identifier {
continue
}
return enumValue.Name
}
}
return nil
}
func GetConstNode(ast *parser.Document, name string) *parser.Const {
if ast == nil {
return nil
}
for _, cst := range ast.Consts {
if cst.BadNode || cst.Name == nil || cst.Name.Name == nil || cst.Name.Name.Text != name {
continue
}
return cst
}
return nil
}
func GetConstIdentifierNode(ast *parser.Document, name string) *parser.Identifier {
if ast == nil {
return nil
}
for _, cst := range ast.Consts {
if cst.BadNode || cst.Name == nil || cst.Name.Name == nil || cst.Name.Name.Text != name {
continue
}
return cst.Name
}
return nil
}
func GetTypedefNode(ast *parser.Document, name string) *parser.Typedef {
if ast == nil {
return nil
}
for _, td := range ast.Typedefs {
if td.BadNode || td.Alias == nil || td.Alias.Name == nil {
continue
}
if td.Alias.Name.Text == name {
return td
}
}
return nil
}
func GetServiceNode(ast *parser.Document, name string) *parser.Service {
if ast == nil {
return nil
}
for _, svc := range ast.Services {
if svc.BadNode || svc.Name == nil || svc.Name.Name == nil || svc.Name.Name.Text != name {
continue
}
return svc
}
return nil
}
func jump(file uri.URI, node parser.Node) protocol.Location {
rng := lsputils.ASTNodeToRange(node)
return protocol.Location{
Range: rng,
URI: file,
}
}
var basicType = map[string]struct{}{
"map": {},
"set": {},
"list": {},
"string": {},
"i16": {},
"i32": {},
"i64": {},
"i8": {},
"double": {},
"bool": {},
"byte": {},
"binary": {},
"uuid": {},
}
var containerType = map[string]struct{}{
"map": {},
"set": {},
"list": {},
}
func IsBasicType(t string) bool {
_, ok := basicType[t]
return ok
}
func IsContainerType(t string) bool {
_, ok := containerType[t]
return ok
}
package completion
import (
"context"
"github.com/joyme123/protocol"
"github.com/joyme123/thrift-ls/lsp/cache"
)
type Interface interface {
Completion(ctx context.Context, ss *cache.Snapshot, cmp *CompletionRequest) ([]*CompletionItem, protocol.Range, error)
}
// SemanticBasedCompletion generates completion list based on semantic. It is more precisely than token based completion
// TODO(jpf)
type SemanticBasedCompletion struct {
}
func BuildCompletionItem(candidate Candidate) *CompletionItem {
return &CompletionItem{
Label: candidate.showText,
Detail: candidate.showText,
InsertText: candidate.insertText,
InsertTextFormat: candidate.format,
Kind: protocol.CompletionItemKindText,
Deprecated: false,
Score: 90,
Documentation: "",
}
}
package completion
import (
"context"
"fmt"
"path/filepath"
"strings"
"github.com/joyme123/protocol"
"github.com/joyme123/thrift-ls/lsp/cache"
"github.com/joyme123/thrift-ls/lsp/lsputils"
"github.com/joyme123/thrift-ls/parser"
"github.com/joyme123/thrift-ls/utils"
log "github.com/sirupsen/logrus"
"go.lsp.dev/uri"
)
var DefaultTokenCompletion Interface = &TokenCompletion{}
// TokenCompletion is token based completion. It generates completion list based on identifier in ast
type TokenCompletion struct {
}
var keywords = map[string]protocol.InsertTextFormat{
"bool": protocol.InsertTextFormatPlainText,
"byte": protocol.InsertTextFormatPlainText,
"i16": protocol.InsertTextFormatPlainText,
"i32": protocol.InsertTextFormatPlainText,
"i64": protocol.InsertTextFormatPlainText,
"double": protocol.InsertTextFormatPlainText,
"binary": protocol.InsertTextFormatPlainText,
"uuid": protocol.InsertTextFormatPlainText,
"string": protocol.InsertTextFormatPlainText,
"required": protocol.InsertTextFormatPlainText,
"optional": protocol.InsertTextFormatPlainText,
"include": protocol.InsertTextFormatPlainText,
"cpp_include": protocol.InsertTextFormatPlainText,
"list<$1>": protocol.InsertTextFormatSnippet,
"set<$1>": protocol.InsertTextFormatSnippet,
"map<$1, $2>": protocol.InsertTextFormatSnippet,
"struct $1 {\n$2\n}": protocol.InsertTextFormatSnippet,
"const $1 $2 = $3": protocol.InsertTextFormatSnippet,
"service $1 {\n$2\n}": protocol.InsertTextFormatSnippet,
"union $1 {\n$2\n}": protocol.InsertTextFormatSnippet,
"exception $1 {\n$2\n}": protocol.InsertTextFormatSnippet,
"throws ($1)": protocol.InsertTextFormatSnippet,
"typedef $1 $2": protocol.InsertTextFormatSnippet,
}
type Candidate struct {
showText string
insertText string
format protocol.InsertTextFormat
}
func (c *TokenCompletion) Completion(ctx context.Context, ss *cache.Snapshot, cmp *CompletionRequest) ([]*CompletionItem, protocol.Range, error) {
tokens := ss.Tokens()
rng := protocol.Range{
Start: protocol.Position{
Line: cmp.Pos.Line,
Character: cmp.Pos.Character,
},
End: protocol.Position{
Line: cmp.Pos.Line,
Character: cmp.Pos.Character,
},
}
log.Debugln("all tokens:", tokens)
parsedFile, err := ss.Parse(ctx, cmp.Fh.URI())
if err != nil {
return nil, rng, err
}
if parsedFile.AST() == nil {
return nil, rng, fmt.Errorf("parser ast failed")
}
pos, err := parsedFile.Mapper().LSPPosToParserPosition(cmp.Pos)
if err != nil {
return nil, rng, err
}
candidates := make([]Candidate, 0)
log.Debugln("parser pos: ", pos)
includeLiteralPos := pos
includeLiteralPos.Col = includeLiteralPos.Col - 1 // remove quote
nodePath := parser.SearchNodePathByPosition(parsedFile.AST(), includeLiteralPos)
if items, includeRng, err := c.includeCompletion(ss, cmp.Fh.URI(), nodePath); err == nil {
for i := range items {
candidates = append(candidates, items[i])
}
if len(items) > 0 {
rng = includeRng
log.Debugln("include completion candidates: ", candidates)
}
}
if len(candidates) == 0 {
nodePath = parser.SearchNodePathByPosition(parsedFile.AST(), pos)
content, err := cmp.Fh.Content()
if err != nil {
return nil, rng, err
}
var prefix []byte
// get prefix by pos
for i := pos.Offset - 1; i >= 0; i-- {
if utils.Space(content[i]) || content[i] == '.' || content[i] == '\'' || content[i] == '"' {
prefix = content[i+1 : pos.Offset]
rng.Start.Character = rng.Start.Character - uint32(len(prefix))
break
}
}
if len(prefix) == 0 {
// prefix is empty, set prefix to content
prefix = content
rng.Start.Character = rng.Start.Character - uint32(len(prefix))
}
searchCandidate := func(token string, format protocol.InsertTextFormat) {
if len(token) > len(prefix) && strings.HasPrefix(token, string(prefix)) {
candidates = append(candidates, Candidate{
showText: token,
insertText: token,
format: format,
})
}
}
for i := range keywords {
searchCandidate(i, keywords[i])
if len(candidates) >= 10 {
break
}
}
for i := range tokens {
searchCandidate(i, protocol.InsertTextFormatPlainText)
if len(candidates) >= 10 {
break
}
}
log.Debugln("token prefix:", string(prefix), "candidates: ", candidates)
}
res := make([]*CompletionItem, 0, len(candidates))
for i := range candidates {
res = append(res, BuildCompletionItem(candidates[i]))
}
return res, rng, nil
}
func (c *TokenCompletion) includeCompletion(ss *cache.Snapshot, file uri.URI, nodePath []parser.Node) (res []Candidate, rng protocol.Range, err error) {
if len(nodePath) < 3 {
return
}
if nodePath[len(nodePath)-1].Type() != "LiteralValue" || nodePath[len(nodePath)-2].Type() != "Literal" || nodePath[len(nodePath)-3].Type() != "Include" {
return
}
targetNode := nodePath[len(nodePath)-1].(*parser.LiteralValue)
pathPrefix := targetNode.Text
rng = lsputils.ASTNodeToRange(targetNode)
currentDir := filepath.Dir(file.Filename())
log.Debugf("search prefix %s in path %s", pathPrefix, currentDir)
res, err = ListDirAndFiles(currentDir, pathPrefix)
log.Debugln("include completion: ", res, "err", err)
return
}
package completion
import (
"io/fs"
"path/filepath"
"strings"
"github.com/joyme123/protocol"
"github.com/joyme123/thrift-ls/lsp/constants"
log "github.com/sirupsen/logrus"
)
func ListDirAndFiles(dir, prefix string) (res []Candidate, err error) {
// handle prefix list ../../us
prefixClean := prefix
if len(prefix) > 0 {
prefixClean = filepath.Clean(prefix)
}
if prefix == "." {
prefix = prefix + "/"
}
up := strings.Count(prefixClean, "../")
pathItems := strings.Split(dir, "/")
if len(pathItems) < up {
return
}
pathItems = pathItems[0 : len(pathItems)-up]
dir, filePrefix := filepath.Split(strings.TrimPrefix(prefixClean, "../"))
filePrefix = strings.TrimPrefix(filePrefix, "./")
baseDir := strings.Join(pathItems, "/") + "/" + dir
prefix = strings.TrimSuffix(prefix, filePrefix)
log.Debugf("include completion: walk dir %s with prefix %s, filePrefix %s", baseDir, prefix, filePrefix)
filepath.WalkDir(baseDir, func(path string, d fs.DirEntry, err error) error {
if err != nil || baseDir == path {
return nil
}
log.Debugf("include completion: name: %s, prefix: %s", d.Name(), filePrefix)
if strings.HasPrefix(d.Name(), filePrefix) {
if d.IsDir() {
res = append(res, Candidate{
showText: prefix + d.Name() + "/",
insertText: prefix + d.Name() + "/",
format: protocol.InsertTextFormatPlainText,
})
} else if strings.HasSuffix(d.Name(), constants.ThriftExtension) {
res = append(res, Candidate{
showText: prefix + d.Name(),
insertText: prefix + d.Name(),
format: protocol.InsertTextFormatPlainText,
})
}
}
if d.IsDir() {
return filepath.SkipDir
}
return nil
})
return
}
package lsp
import (
"context"
"encoding/json"
log "github.com/sirupsen/logrus"
"go.lsp.dev/jsonrpc2"
)
func DebugReplier(reply jsonrpc2.Replier) jsonrpc2.Replier {
return func(ctx context.Context, result interface{}, err error) error {
res, _ := json.Marshal(result)
log.Debug("jsonrpc reply debug: ", "result", string(res), "err", err)
return reply(ctx, result, err)
}
}
func DebugHandler(handler jsonrpc2.Handler) jsonrpc2.Handler {
return func(ctx context.Context, reply jsonrpc2.Replier, req jsonrpc2.Request) error {
log.Debug("jsonrpc request debug: ", "req", req.Method(), "params", string(req.Params()))
defer func() {
if r := recover(); r != nil {
log.Errorln("Recovered: ", r)
}
}()
return handler(ctx, DebugReplier(reply), req)
}
}
package lsp
import (
"context"
"github.com/joyme123/protocol"
"github.com/joyme123/thrift-ls/lsp/cache"
"github.com/joyme123/thrift-ls/lsp/diagnostic"
"github.com/joyme123/thrift-ls/utils/errors"
log "github.com/sirupsen/logrus"
"go.lsp.dev/uri"
)
func (s *Server) diagnostic(ctx context.Context, ss *cache.Snapshot, changeFile *cache.FileChange) error {
if s.client == nil {
return nil
}
log.Debugln("-----------diagnostic called-----------")
defer log.Debugln("-----------diagnostic finish-----------")
diag := diagnostic.NewDiagnostic()
diagRes, err := diag.Diagnostic(ctx, ss, []uri.URI{changeFile.URI})
if err != nil {
log.Errorf("diagnostic failed: %v", err)
}
log.Debugln("publish diagnostric result: ", len(diagRes))
var errs []error
for file, res := range diagRes {
if res == nil {
res = make([]protocol.Diagnostic, 0)
}
params := &protocol.PublishDiagnosticsParams{
URI: file,
Diagnostics: res,
}
log.Debugln("file:", file, "diagnostics", res)
err = s.client.PublishDiagnostics(ctx, params)
if err != nil {
errs = append(errs, err)
}
}
if len(errs) > 0 {
return errors.NewAggregate(errs)
}
return nil
}
package diagnostic
import (
"context"
"fmt"
"github.com/joyme123/protocol"
"github.com/joyme123/thrift-ls/lsp/cache"
"github.com/joyme123/thrift-ls/lsp/lsputils"
"github.com/joyme123/thrift-ls/parser"
log "github.com/sirupsen/logrus"
"go.lsp.dev/uri"
)
type CycleCheck struct {
}
func (c *CycleCheck) Diagnostic(ctx context.Context, ss *cache.Snapshot, changeFiles []uri.URI) (DiagnosticResult, error) {
includesMap := make(map[uri.URI][]Include)
for _, file := range changeFiles {
getIncludes(ctx, ss, file, &includesMap)
}
cyclePairs := cycleDetect(&includesMap)
return cycleToDiagnosticItems(cyclePairs), nil
}
func (c *CycleCheck) Name() string {
return "CycleCheck"
}
func cycleToDiagnosticItems(pairs []CyclePair) DiagnosticResult {
diagnostics := make(DiagnosticResult)
for i := range pairs {
diagnostics[pairs[i].file] = append(diagnostics[pairs[i].file], cyclePairToDiagnostic(pairs[i]))
}
return diagnostics
}
func cyclePairToDiagnostic(pair CyclePair) protocol.Diagnostic {
res := protocol.Diagnostic{
Range: lsputils.ASTNodeToRange(pair.include.include),
Severity: protocol.DiagnosticSeverityWarning,
Source: "thrift-ls",
Message: fmt.Sprintf("cycle dependency in %s", pair.include.file),
}
return res
}
type Include struct {
file uri.URI
include *parser.Include
}
type CyclePair struct {
file uri.URI
include Include
}
func cycleDetect(includesMap *map[uri.URI][]Include) []CyclePair {
cyclePairs := make([]CyclePair, 0)
for uri, includes := range *includesMap {
for _, incI := range includes {
for _, incJ := range (*includesMap)[incI.file] {
if uri == incJ.file {
cyclePairs = append(cyclePairs, CyclePair{
file: uri,
include: incI,
})
}
}
}
}
return cyclePairs
}
func getIncludes(ctx context.Context, ss *cache.Snapshot, file uri.URI, includesMap *map[uri.URI][]Include) error {
pf, err := ss.Parse(ctx, file)
if err != nil {
log.Errorf("parse %s failed: %v", file, err)
return err
}
if pf.AST() == nil {
log.Errorf("parse ast failed: %v", pf.AggregatedError())
return pf.AggregatedError()
}
includes := pf.AST().Includes
for i := range includes {
if includes[i].Path == nil || includes[i].Path.BadNode {
continue
}
(*includesMap)[file] = append((*includesMap)[file], Include{
file: lsputils.IncludeURI(file, includes[i].Path.Value.Text),
include: includes[i],
})
includeURI := lsputils.IncludeURI(file, includes[i].Path.Value.Text)
if _, ok := (*includesMap)[includeURI]; ok {
continue
}
getIncludes(ctx, ss, includeURI, includesMap)
}
return nil
}
package diagnostic
import (
"context"
"github.com/joyme123/protocol"
"github.com/joyme123/thrift-ls/lsp/cache"
"github.com/joyme123/thrift-ls/utils/errors"
log "github.com/sirupsen/logrus"
"go.lsp.dev/uri"
)
var registry []Interface
func init() {
registry = []Interface{
&CycleCheck{},
&Parse{},
&FieldIDCheck{},
&SemanticAnalysis{},
}
}
type Interface interface {
Diagnostic(ctx context.Context, ss *cache.Snapshot, changeFiles []uri.URI) (DiagnosticResult, error)
Name() string
}
type Diagnostic struct {
}
func NewDiagnostic() Interface {
return &Diagnostic{}
}
func (d *Diagnostic) Diagnostic(ctx context.Context, ss *cache.Snapshot, changeFiles []uri.URI) (DiagnosticResult, error) {
res := make(DiagnosticResult)
var errs []error
for _, impl := range registry {
log.Debugln("diagnostic called: ", impl.Name())
diagRes, err := impl.Diagnostic(ctx, ss, changeFiles)
if err != nil {
errs = append(errs, err)
}
for key, items := range diagRes {
res[key] = append(res[key], items...)
}
}
if len(errs) > 0 {
return res, errors.NewAggregate(errs)
}
return res, nil
}
func (d *Diagnostic) Name() string {
return "Diagnostic"
}
type DiagnosticResult map[uri.URI][]protocol.Diagnostic
package diagnostic
import (
"context"
"errors"
"fmt"
"github.com/joyme123/protocol"
"github.com/joyme123/thrift-ls/lsp/cache"
"github.com/joyme123/thrift-ls/lsp/lsputils"
"github.com/joyme123/thrift-ls/parser"
log "github.com/sirupsen/logrus"
"go.lsp.dev/uri"
)
type FieldIDCheck struct {
}
// FieldIDCheck checks struct, union, exception, function pramas, function throws field id
// field id format: have a unique, positive integer identifier.
// ref doc: http://diwakergupta.github.io/thrift-missing-guide/#_defining_structs
func (c *FieldIDCheck) Diagnostic(ctx context.Context, ss *cache.Snapshot, changeFiles []uri.URI) (DiagnosticResult, error) {
res := make(DiagnosticResult)
for _, file := range changeFiles {
items, err := c.diagnostic(ctx, ss, file)
if err != nil {
return nil, err
}
res[file] = items
}
return res, nil
}
func (c *FieldIDCheck) Name() string {
return "FieldIDCheck"
}
func (c *FieldIDCheck) diagnostic(ctx context.Context, ss *cache.Snapshot, file uri.URI) ([]protocol.Diagnostic, error) {
pf, err := ss.Parse(ctx, file)
if err != nil {
return nil, err
}
if pf.AST() == nil {
return nil, errors.New("parse ast failed")
}
for _, err := range pf.Errors() {
log.Debugln("parse err", err)
}
var ret []protocol.Diagnostic
processStructLike := func(fields []*parser.Field) {
fieldIDSet := make(map[int][]*parser.Field)
for i := range fields {
field := fields[i]
if field.Index == nil || field.Index.BadNode {
continue
}
fieldIDSet[field.Index.Value] = append(fieldIDSet[field.Index.Value], field)
}
for fieldID, set := range fieldIDSet {
if fieldID < 1 || fieldID > 32767 {
for _, field := range set {
// field ID exceeded
ret = append(ret, protocol.Diagnostic{
Range: lsputils.ASTNodeToRange(field.Index),
Severity: protocol.DiagnosticSeverityError,
Source: "thrift-ls",
Message: fmt.Sprintf("field id should be a positive integer in [1, 32767]"),
})
}
}
if len(set) == 1 {
continue
}
for _, field := range set {
// field id conflict
ret = append(ret, protocol.Diagnostic{
Range: lsputils.ASTNodeToRange(field.Index),
Severity: protocol.DiagnosticSeverityError,
Source: "thrift-ls",
Message: fmt.Sprintf("field id conflict"),
})
}
}
}
for _, st := range pf.AST().Structs {
processStructLike(st.Fields)
}
for _, union := range pf.AST().Unions {
processStructLike(union.Fields)
}
for _, excep := range pf.AST().Exceptions {
processStructLike(excep.Fields)
}
for _, svc := range pf.AST().Services {
for _, fn := range svc.Functions {
processStructLike(fn.Arguments)
if fn.Throws != nil {
processStructLike(fn.Throws.Fields)
}
}
}
return ret, nil
}
package diagnostic
import (
"context"
"github.com/joyme123/protocol"
"github.com/joyme123/thrift-ls/lsp/cache"
"github.com/joyme123/thrift-ls/parser"
"github.com/joyme123/thrift-ls/utils/errors"
log "github.com/sirupsen/logrus"
"go.lsp.dev/uri"
)
type Parse struct {
}
func (p *Parse) Diagnostic(ctx context.Context, ss *cache.Snapshot, changeFiles []uri.URI) (DiagnosticResult, error) {
var errs []error
res := make(DiagnosticResult)
for _, uri := range changeFiles {
parseRes, err := ss.Parse(ctx, uri)
if err != nil {
errs = append(errs, err)
continue
}
// TODO(jpf): 递归解析
for _, err := range parseRes.Errors() {
parseErr, ok := err.(parser.ParserError)
if !ok {
continue
}
log.Debugf("diagnostic parse err: %v", parseErr)
diag := parseErrToDiagnostic(parseErr)
res[uri] = append(res[uri], diag)
}
}
if len(errs) > 0 {
return res, errors.NewAggregate(errs)
}
return res, nil
}
func (p *Parse) Name() string {
return "Parse"
}
func parseErrToDiagnostic(err parser.ParserError) protocol.Diagnostic {
line, col, _ := err.Pos()
diag := protocol.Diagnostic{
Range: protocol.Range{
Start: protocol.Position{
Line: uint32(line - 1),
Character: uint32(col - 1),
},
End: protocol.Position{
Line: uint32(line - 1),
Character: uint32(col - 1),
},
},
Severity: protocol.DiagnosticSeverityError,
Source: "thrift-ls",
Message: err.InnerError().Error(),
}
return diag
}
package diagnostic
import (
"context"
"errors"
"fmt"
"github.com/joyme123/protocol"
"github.com/joyme123/thrift-ls/lsp/cache"
"github.com/joyme123/thrift-ls/lsp/codejump"
"github.com/joyme123/thrift-ls/lsp/lsputils"
"github.com/joyme123/thrift-ls/parser"
log "github.com/sirupsen/logrus"
"go.lsp.dev/uri"
)
type SemanticAnalysis struct {
}
func (s *SemanticAnalysis) Diagnostic(ctx context.Context, ss *cache.Snapshot, changeFiles []uri.URI) (DiagnosticResult, error) {
res := make(DiagnosticResult)
for _, file := range changeFiles {
items, err := s.diagnostic(ctx, ss, file)
if err != nil {
return nil, err
}
res[file] = items
}
return res, nil
}
func (s *SemanticAnalysis) Name() string {
return "SemanticAnalysis"
}
func (s *SemanticAnalysis) diagnostic(ctx context.Context, ss *cache.Snapshot, changeFile uri.URI) ([]protocol.Diagnostic, error) {
pf, err := ss.Parse(ctx, changeFile)
if err != nil {
return nil, err
}
if pf.AST() == nil {
return nil, errors.New("parse ast failed")
}
for _, err := range pf.Errors() {
log.Debugln("parse err", err)
}
res := s.checkDefineConflict(ctx, pf)
items := s.checkDefinitionExist(ctx, ss, changeFile, pf)
res = append(res, items...)
return res, nil
}
func (s *SemanticAnalysis) checkDefineConflict(ctx context.Context, pf *cache.ParsedFile) []protocol.Diagnostic {
var ret []protocol.Diagnostic
processStructLike := func(fields []*parser.Field) {
fieldMap := make(map[string]struct{})
for i := range fields {
field := fields[i]
if field.IsBadNode() || field.ChildrenBadNode() {
continue
}
if _, exist := fieldMap[field.Identifier.Name.Text]; exist {
// struct conflict
ret = append(ret, protocol.Diagnostic{
Range: lsputils.ASTNodeToRange(field.Identifier.Name),
Severity: protocol.DiagnosticSeverityError,
Source: "thrift-ls",
Message: fmt.Sprintf("field name conflict with other field"),
})
}
fieldMap[field.Identifier.Name.Text] = struct{}{}
}
}
definitionNameMap := make(map[string]string)
structMap := make(map[string]struct{})
for _, st := range pf.AST().Structs {
if st.IsBadNode() || st.ChildrenBadNode() {
continue
}
if _, exist := structMap[st.Identifier.Name.Text]; exist {
// struct conflict
ret = append(ret, protocol.Diagnostic{
Range: lsputils.ASTNodeToRange(st.Identifier.Name),
Severity: protocol.DiagnosticSeverityError,
Source: "thrift-ls",
Message: fmt.Sprintf("struct name conflict with other struct"),
})
}
if t, exist := definitionNameMap[st.Identifier.Name.Text]; exist && t != st.Type() {
// struct conflict
ret = append(ret, protocol.Diagnostic{
Range: lsputils.ASTNodeToRange(st.Identifier.Name),
Severity: protocol.DiagnosticSeverityHint,
Source: "thrift-ls",
Message: fmt.Sprintf("struct name conflict with other type"),
})
}
structMap[st.Identifier.Name.Text] = struct{}{}
definitionNameMap[st.Identifier.Name.Text] = st.Type()
processStructLike(st.Fields)
}
unionMap := make(map[string]struct{})
for _, union := range pf.AST().Unions {
if union.IsBadNode() || union.ChildrenBadNode() {
continue
}
if _, exist := unionMap[union.Name.Name.Text]; exist {
// union conflict
ret = append(ret, protocol.Diagnostic{
Range: lsputils.ASTNodeToRange(union.Name.Name),
Severity: protocol.DiagnosticSeverityError,
Source: "thrift-ls",
Message: fmt.Sprintf("union name conflict with other union"),
})
}
if t, exist := definitionNameMap[union.Name.Name.Text]; exist && t != union.Type() {
// union conflict with others
ret = append(ret, protocol.Diagnostic{
Range: lsputils.ASTNodeToRange(union.Name.Name),
Severity: protocol.DiagnosticSeverityHint,
Source: "thrift-ls",
Message: fmt.Sprintf("union name conflict with other type"),
})
}
unionMap[union.Name.Name.Text] = struct{}{}
definitionNameMap[union.Name.Name.Text] = union.Type()
processStructLike(union.Fields)
}
excepMap := make(map[string]struct{})
for _, excep := range pf.AST().Exceptions {
if excep.IsBadNode() || excep.ChildrenBadNode() {
continue
}
if _, exist := excepMap[excep.Name.Name.Text]; exist {
// exception conflict
ret = append(ret, protocol.Diagnostic{
Range: lsputils.ASTNodeToRange(excep.Name.Name),
Severity: protocol.DiagnosticSeverityError,
Source: "thrift-ls",
Message: fmt.Sprintf("exception name conflict with other exception"),
})
}
if t, exist := definitionNameMap[excep.Name.Name.Text]; exist && t != excep.Type() {
// union conflict with others
ret = append(ret, protocol.Diagnostic{
Range: lsputils.ASTNodeToRange(excep.Name.Name),
Severity: protocol.DiagnosticSeverityHint,
Source: "thrift-ls",
Message: fmt.Sprintf("exception name conflict with other type"),
})
}
excepMap[excep.Name.Name.Text] = struct{}{}
definitionNameMap[excep.Name.Name.Text] = excep.Type()
processStructLike(excep.Fields)
}
svcMap := make(map[string]struct{})
for _, svc := range pf.AST().Services {
if svc.IsBadNode() || svc.ChildrenBadNode() {
continue
}
if _, exist := svcMap[svc.Name.Name.Text]; exist {
// service conflict
ret = append(ret, protocol.Diagnostic{
Range: lsputils.ASTNodeToRange(svc.Name.Name),
Severity: protocol.DiagnosticSeverityError,
Source: "thrift-ls",
Message: fmt.Sprintf("service name conflict with other service"),
})
}
if t, exist := definitionNameMap[svc.Name.Name.Text]; exist && t != svc.Type() {
// service conflict with others
ret = append(ret, protocol.Diagnostic{
Range: lsputils.ASTNodeToRange(svc.Name.Name),
Severity: protocol.DiagnosticSeverityHint,
Source: "thrift-ls",
Message: fmt.Sprintf("service name conflict with other type"),
})
}
svcMap[svc.Name.Name.Text] = struct{}{}
definitionNameMap[svc.Name.Name.Text] = svc.Type()
fnMap := make(map[string]struct{})
for _, fn := range svc.Functions {
if fn.IsBadNode() || svc.ChildrenBadNode() {
continue
}
if _, exist := fnMap[fn.Name.Name.Text]; exist {
// function conflict
ret = append(ret, protocol.Diagnostic{
Range: lsputils.ASTNodeToRange(fn.Name.Name),
Severity: protocol.DiagnosticSeverityWarning,
Source: "thrift-ls",
Message: fmt.Sprintf("function name conflict with other function"),
})
}
fnMap[fn.Name.Name.Text] = struct{}{}
processStructLike(fn.Arguments)
if fn.Throws != nil {
processStructLike(fn.Throws.Fields)
}
}
}
return ret
}
// same as goto definition
// struct/union/exception field type
func (s *SemanticAnalysis) checkDefinitionExist(ctx context.Context, ss *cache.Snapshot, file uri.URI, pf *cache.ParsedFile) []protocol.Diagnostic {
ret := make([]protocol.Diagnostic, 0)
// struct/union/exception/function arguments/throw fields field type
processStructLike := func(fields []*parser.Field) {
for i := range fields {
field := fields[i]
if field.IsBadNode() || field.ChildrenBadNode() {
continue
}
items := s.checkTypeExist(ctx, ss, file, pf, field.FieldType)
ret = append(ret, items...)
// default value check
if field.ConstValue != nil {
items := s.checkConstValueExist(ctx, ss, file, pf, field.ConstValue)
ret = append(ret, items...)
dig := s.checkConstValueMatchType(ctx, field)
if dig != nil {
ret = append(ret, *dig)
}
}
}
}
for _, st := range pf.AST().Structs {
processStructLike(st.Fields)
}
for _, union := range pf.AST().Unions {
processStructLike(union.Fields)
}
for _, excep := range pf.AST().Exceptions {
processStructLike(excep.Fields)
}
for _, cst := range pf.AST().Consts {
items := s.checkConstValueExist(ctx, ss, file, pf, cst.Value)
ret = append(ret, items...)
}
for _, svc := range pf.AST().Services {
for _, fn := range svc.Functions {
if fn.FunctionType != nil {
items := s.checkTypeExist(ctx, ss, file, pf, fn.FunctionType)
ret = append(ret, items...)
}
processStructLike(fn.Arguments)
if fn.Throws != nil {
processStructLike(fn.Throws.Fields)
}
}
}
return ret
}
func (s *SemanticAnalysis) checkConstValueExist(ctx context.Context, ss *cache.Snapshot,
file uri.URI, pf *cache.ParsedFile, cst *parser.ConstValue) (res []protocol.Diagnostic) {
if cst.TypeName != "identifier" {
return
}
if cst.Value == "true" || cst.Value == "false" {
return
}
_, id, err := codejump.ConstValueTypeDefinitionIdentifier(ctx, ss, file, pf.AST(), cst)
if err != nil || id == nil {
res = append(res, protocol.Diagnostic{
Range: lsputils.ASTNodeToRange(cst),
Severity: protocol.DiagnosticSeverityError,
Source: "thrift-ls",
Message: fmt.Sprintf("default value doesn't exist"),
})
}
return
}
func (s *SemanticAnalysis) checkConstValueMatchType(ctx context.Context, field *parser.Field) (res *protocol.Diagnostic) {
if field.BadNode || field.ChildrenBadNode() {
return nil
}
expectTypeName := field.FieldType.TypeName
if field.ConstValue != nil {
valueType := field.ConstValue.TypeName
// TypeName can be: list, map, pair, string, identifier, i64, double
switch valueType {
case "list", "map", "string", "double":
if expectTypeName.Name != valueType {
return &protocol.Diagnostic{
Range: lsputils.ASTNodeToRange(field.ConstValue),
Severity: protocol.DiagnosticSeverityError,
Source: "thrift-ls",
Message: fmt.Sprintf("expect %s but got %s", expectTypeName.Name, valueType),
}
}
case "identifier":
if valueType == "identifier" &&
(field.ConstValue.Value == "true" || field.ConstValue.Value == "false") {
valueType = "bool"
}
if expectTypeName.Name == "bool" {
if field.ConstValue.Value != "true" && field.ConstValue.Value != "false" {
return &protocol.Diagnostic{
Range: lsputils.ASTNodeToRange(field.ConstValue),
Severity: protocol.DiagnosticSeverityError,
Source: "thrift-ls",
Message: fmt.Sprintf("expect %s but got %s", expectTypeName.Name, valueType),
}
}
} else if codejump.IsBasicType(valueType) {
return &protocol.Diagnostic{
Range: lsputils.ASTNodeToRange(field.ConstValue),
Severity: protocol.DiagnosticSeverityError,
Source: "thrift-ls",
Message: fmt.Sprintf("expect %s but got %s", expectTypeName.Name, valueType),
}
}
case "i64":
if expectTypeName.Name != "i8" &&
expectTypeName.Name != "i16" &&
expectTypeName.Name != "i32" &&
expectTypeName.Name != "i64" {
return &protocol.Diagnostic{
Range: lsputils.ASTNodeToRange(field.ConstValue),
Severity: protocol.DiagnosticSeverityError,
Source: "thrift-ls",
Message: fmt.Sprintf("expect %s but got %s", expectTypeName.Name, valueType),
}
}
}
}
return nil
}
func (s *SemanticAnalysis) checkTypeExist(ctx context.Context, ss *cache.Snapshot,
file uri.URI, pf *cache.ParsedFile, ft *parser.FieldType) (res []protocol.Diagnostic) {
if codejump.IsContainerType(ft.TypeName.Name) {
return s.checkContainerTypeExist(ctx, ss, file, pf, ft)
} else if codejump.IsBasicType(ft.TypeName.Name) {
return nil
} else {
_, id, _, err := codejump.TypeNameDefinitionIdentifier(ctx, ss, file, pf.AST(), ft.TypeName)
if err != nil || id == nil {
res = append(res, protocol.Diagnostic{
Range: lsputils.ASTNodeToRange(ft),
Severity: protocol.DiagnosticSeverityError,
Source: "thrift-ls",
Message: fmt.Sprintf("field type doesn't exist"),
})
}
}
return res
}
func (s *SemanticAnalysis) checkContainerTypeExist(ctx context.Context,
ss *cache.Snapshot, file uri.URI, pf *cache.ParsedFile, ft *parser.FieldType) (res []protocol.Diagnostic) {
if ft.KeyType != nil {
items := s.checkTypeExist(ctx, ss, file, pf, ft.KeyType)
res = append(res, items...)
}
if ft.ValueType != nil {
items := s.checkTypeExist(ctx, ss, file, pf, ft.ValueType)
res = append(res, items...)
}
return res
}
// TODO(jpf): 类型和默认值的类型要一致
package lsp
import (
"context"
"github.com/joyme123/protocol"
"github.com/joyme123/thrift-ls/format"
"github.com/joyme123/thrift-ls/lsp/mapper"
)
func (s *Server) formatting(ctx context.Context, params *protocol.DocumentFormattingParams) (result []protocol.TextEdit, err error) {
// TODO: 支持 format options
document := params.TextDocument
fileURI := document.URI
view, err := s.session.ViewOf(fileURI)
if err != nil {
return nil, err
}
ss, release := view.Snapshot()
defer release()
fh, err := ss.ReadFile(ctx, fileURI)
if err != nil {
return nil, err
}
bytes, err := fh.Content()
if err != nil {
return nil, err
}
pf, err := ss.Parse(ctx, fileURI)
if err != nil {
return nil, err
}
if len(pf.Errors()) > 0 || pf.AST() == nil {
return nil, pf.AggregatedError()
}
formatted, err := format.FormatDocument(pf.AST())
if err != nil {
return nil, err
}
mp := mapper.NewMapper(fileURI, bytes)
endPos := mp.GetLSPEndPosition()
textEdit := protocol.TextEdit{
Range: protocol.Range{
Start: protocol.Position{
Line: 0,
Character: 0,
},
End: protocol.Position{
Line: endPos.Line,
Character: endPos.Character,
},
},
NewText: formatted,
}
result = append(result, textEdit)
return
}
package lsp
import (
"context"
"strings"
"github.com/joyme123/protocol"
"github.com/joyme123/thrift-ls/lsp/codejump"
)
func (s *Server) hover(ctx context.Context, params *protocol.HoverParams) (*protocol.Hover, error) {
file := params.TextDocument.URI
view, err := s.session.ViewOf(file)
if err != nil {
return nil, err
}
ss, release := view.Snapshot()
defer release()
content, err := codejump.Hover(ctx, ss, params.TextDocument.URI, params.Position)
if err != nil {
return nil, err
}
if content == "" {
return nil, nil
}
markdown_prefix := "```thrift\n"
if strings.HasPrefix(content, "\n") {
markdown_prefix = "```thrift"
}
markdown_suffix := "\n```"
if strings.HasSuffix(content, "\n") {
markdown_suffix = "```"
}
return &protocol.Hover{
Contents: protocol.MarkupContent{
Kind: protocol.Markdown,
Value: markdown_prefix + content + markdown_suffix,
},
}, nil
}
package lsp
import (
"context"
"fmt"
"path"
"strings"
"github.com/joyme123/protocol"
"github.com/joyme123/thrift-ls/lsp/cache"
"github.com/joyme123/thrift-ls/lsp/completion"
"github.com/joyme123/thrift-ls/lsp/types"
log "github.com/sirupsen/logrus"
"go.lsp.dev/uri"
)
func (s *Server) didOpen(ctx context.Context, params *protocol.DidOpenTextDocumentParams) error {
document := params.TextDocument
if document.LanguageID != LanguageIDThrift {
return nil
}
fileURI := document.URI
change := &cache.FileChange{
URI: fileURI,
Version: int(document.Version),
Content: []byte(document.Text),
From: cache.FileChangeTypeDidOpen,
}
s.session.Initialize(func() {
file := change.URI
dirPos := strings.LastIndexByte(string(file), '/')
if dirPos == -1 {
return
}
dir := file[0:dirPos]
s.walkFoldersThriftFile(dir)
})
return s.openFile(ctx, change)
}
func (s *Server) openFile(ctx context.Context, change *cache.FileChange) error {
if change.From != cache.FileChangeTypeInitialize {
if err := s.session.UpdateOverlayFS(ctx, []*cache.FileChange{change}); err != nil {
return err
}
}
if _, err := s.session.ViewOf(change.URI); err != nil {
// create view for this folder
filename := change.URI.Filename()
dir := uri.New(path.Dir(filename))
s.session.CreateView(dir)
}
view, _ := s.session.ViewOf(change.URI)
view.FileChange(ctx, []*cache.FileChange{change}, func() {
ss, release := view.Snapshot()
defer release()
err := s.diagnostic(ctx, ss, change)
if err != nil {
log.Errorf("diagnostic error: %v", err)
}
})
return nil
}
func (s *Server) didChange(ctx context.Context, params *protocol.DidChangeTextDocumentParams) error {
changes := cache.FileChangeFromLSPDidChange(params)
if err := s.session.UpdateOverlayFS(ctx, changes); err != nil {
return err
}
document := params.TextDocument
fileURI := document.URI
view, err := s.session.ViewOf(fileURI)
if err != nil {
return err
}
view.FileChange(ctx, changes, func() {
ss, release := view.Snapshot()
defer release()
for i := range changes {
err := s.diagnostic(ctx, ss, changes[i])
if err != nil {
log.Error("diagnostic error", err)
}
}
})
return nil
}
func (s *Server) completion(ctx context.Context, params *protocol.CompletionParams) (*protocol.CompletionList, error) {
snapshot, release, fh, err := s.getFileContext(ctx, params.TextDocument.URI)
if err != nil {
return nil, err
}
defer release()
items, rng, err := completion.DefaultTokenCompletion.Completion(ctx, snapshot, &completion.CompletionRequest{
TriggerKind: 0,
Pos: types.Position{
Line: params.Position.Line,
Character: params.Position.Character,
},
Fh: fh,
})
if err != nil {
return nil, err
}
return toLspCompletionList(items, rng), nil
}
func toLspCompletionList(items []*completion.CompletionItem, rng protocol.Range) *protocol.CompletionList {
list := &protocol.CompletionList{
IsIncomplete: true,
}
for i := range items {
item := protocol.CompletionItem{
Label: items[i].Label,
Detail: items[i].Detail,
Kind: items[i].Kind,
TextEdit: &protocol.TextEdit{
NewText: items[i].InsertText,
Range: rng,
},
FilterText: strings.TrimLeft(items[i].Label, "&*"),
InsertTextFormat: items[i].InsertTextFormat,
SortText: fmt.Sprintf("%05d", i),
Preselect: i == 0,
Deprecated: items[i].Deprecated,
Documentation: items[i].Documentation,
}
list.Items = append(list.Items, item)
}
return list
}
func (s *Server) getFileContext(ctx context.Context, uri uri.URI) (ss *cache.Snapshot, release func(), fh cache.FileHandle, err error) {
var view *cache.View
view, err = s.session.ViewOf(uri)
if err != nil {
return
}
ss, release = view.Snapshot()
fh, err = ss.ReadFile(ctx, uri)
if err != nil {
release()
return
}
return
}
package lsp
import (
"context"
"io/fs"
"path/filepath"
"strings"
"github.com/joyme123/protocol"
"github.com/joyme123/thrift-ls/lsp/cache"
log "github.com/sirupsen/logrus"
"go.lsp.dev/uri"
)
func (s *Server) initialize(ctx context.Context, params *protocol.InitializeParams) (result *protocol.InitializeResult, err error) {
rootURI := params.RootURI
if rootURI == "" {
rootURI = uri.URI(params.RootPath)
}
folders := make([]uri.URI, 0)
if rootURI != "" {
folders = append(folders, rootURI)
}
for _, ws := range params.WorkspaceFolders {
folders = append(folders, uri.URI(ws.URI))
}
log.Debugln("initialized folders: ", folders)
if len(folders) > 0 {
s.session.Initialize(func() {
for i := range folders {
s.walkFoldersThriftFile(folders[i])
}
})
}
return initializeResult(), nil
}
func (s *Server) walkFoldersThriftFile(folder uri.URI) {
log.Debugln("walk dir 2: ", folder.Filename())
// WalkDir walk files with lexical order
filepath.WalkDir(folder.Filename(), func(path string, d fs.DirEntry, err error) error {
log.Debugln("walk:", path)
if err != nil {
return nil
}
if d.IsDir() {
return nil
}
if !strings.HasSuffix(path, ".thrift") {
return nil
}
fileURI := uri.File(path)
log.Debugln("file path:", fileURI)
err = s.openFile(context.TODO(), &cache.FileChange{
URI: fileURI,
Version: 0,
Content: []byte{},
From: cache.FileChangeTypeInitialize,
})
if err != nil {
log.Error("openFile err:", err)
err = nil
}
// always return nil to continue parse
return nil
})
}
func initializeResult() *protocol.InitializeResult {
res := &protocol.InitializeResult{
Capabilities: protocol.ServerCapabilities{
TextDocumentSync: &protocol.TextDocumentSyncOptions{
OpenClose: true,
// full is easy to implement. consider to use incremental for performance
Change: protocol.TextDocumentSyncKindFull,
WillSave: true,
WillSaveWaitUntil: true,
Save: &protocol.SaveOptions{
IncludeText: true,
},
},
CompletionProvider: &protocol.CompletionOptions{
ResolveProvider: true,
/**
* The additional characters, beyond the defaults provided by the client (typically
* [a-zA-Z]), that should automatically trigger a completion request. For example
* `.` in JavaScript represents the beginning of an object property or method and is
* thus a good candidate for triggering a completion request.
*
* Most tools trigger a completion request automatically without explicitly
* requesting it using a keyboard shortcut (e.g. Ctrl+Space). Typically they
* do so when the user starts to type an identifier. For example if the user
* types `c` in a JavaScript file code complete will automatically pop up
* present `console` besides others as a completion item. Characters that
* make up identifiers don't need to be listed here.
*/
TriggerCharacters: []string{"."},
},
HoverProvider: &protocol.HoverOptions{
WorkDoneProgressOptions: protocol.WorkDoneProgressOptions{
WorkDoneProgress: true,
},
},
SignatureHelpProvider: &protocol.SignatureHelpOptions{
TriggerCharacters: []string{},
RetriggerCharacters: []string{},
},
DeclarationProvider: &protocol.DeclarationRegistrationOptions{
DeclarationOptions: protocol.DeclarationOptions{
WorkDoneProgressOptions: protocol.WorkDoneProgressOptions{
WorkDoneProgress: true,
},
},
TextDocumentRegistrationOptions: protocol.TextDocumentRegistrationOptions{
DocumentSelector: []*protocol.DocumentFilter{
{
Language: "thrift",
},
},
},
StaticRegistrationOptions: protocol.StaticRegistrationOptions{
ID: "thriftls",
},
},
DefinitionProvider: &protocol.DefinitionOptions{
WorkDoneProgressOptions: protocol.WorkDoneProgressOptions{
WorkDoneProgress: true,
},
},
TypeDefinitionProvider: &protocol.TypeDefinitionRegistrationOptions{
TextDocumentRegistrationOptions: protocol.TextDocumentRegistrationOptions{
DocumentSelector: []*protocol.DocumentFilter{
{
Language: "thrift",
},
},
},
TypeDefinitionOptions: protocol.TypeDefinitionOptions{
WorkDoneProgressOptions: protocol.WorkDoneProgressOptions{
WorkDoneProgress: true,
},
},
StaticRegistrationOptions: protocol.StaticRegistrationOptions{
ID: "thriftls",
},
},
ReferencesProvider: &protocol.ReferenceOptions{
WorkDoneProgressOptions: protocol.WorkDoneProgressOptions{
WorkDoneProgress: true,
},
},
DocumentHighlightProvider: false,
DocumentSymbolProvider: &protocol.DocumentSymbolOptions{
WorkDoneProgressOptions: protocol.WorkDoneProgressOptions{
WorkDoneProgress: true,
},
Label: "thriftls",
},
CodeActionProvider: &protocol.CodeActionOptions{
// TODO(jpf): should support code actions
CodeActionKinds: []protocol.CodeActionKind{},
ResolveProvider: false,
},
CodeLensProvider: &protocol.CodeLensOptions{
ResolveProvider: false,
},
DocumentLinkProvider: &protocol.DocumentLinkOptions{
ResolveProvider: false,
},
ColorProvider: false,
WorkspaceSymbolProvider: &protocol.WorkspaceSymbolOptions{
WorkDoneProgressOptions: protocol.WorkDoneProgressOptions{
WorkDoneProgress: true,
},
},
DocumentFormattingProvider: &protocol.DocumentFormattingOptions{
WorkDoneProgressOptions: protocol.WorkDoneProgressOptions{
WorkDoneProgress: true,
},
},
DocumentRangeFormattingProvider: &protocol.DocumentRangeFormattingOptions{
WorkDoneProgressOptions: protocol.WorkDoneProgressOptions{
WorkDoneProgress: true,
},
},
DocumentOnTypeFormattingProvider: &protocol.DocumentOnTypeFormattingOptions{
FirstTriggerCharacter: "}",
MoreTriggerCharacter: []string{},
},
RenameProvider: &protocol.RenameOptions{
PrepareProvider: false,
},
ExecuteCommandProvider: &protocol.ExecuteCommandOptions{
Commands: []string{},
},
CallHierarchyProvider: false,
LinkedEditingRangeProvider: false,
SemanticTokensProvider: &protocol.SemanticTokensRegistrationOptions{
TextDocumentRegistrationOptions: protocol.TextDocumentRegistrationOptions{
DocumentSelector: []*protocol.DocumentFilter{
{
Language: "thrift",
},
},
},
SemanticTokensOptions: protocol.SemanticTokensOptions{
WorkDoneProgressOptions: protocol.WorkDoneProgressOptions{
WorkDoneProgress: true,
},
Legend: protocol.SemanticTokensLegend{
TokenTypes: []protocol.SemanticTokenTypes{},
TokenModifiers: []protocol.SemanticTokenModifiers{},
},
},
StaticRegistrationOptions: protocol.StaticRegistrationOptions{
ID: "thriftls",
},
},
Workspace: &protocol.ServerCapabilitiesWorkspace{
WorkspaceFolders: &protocol.ServerCapabilitiesWorkspaceFolders{
Supported: true,
ChangeNotifications: true,
},
FileOperations: &protocol.ServerCapabilitiesWorkspaceFileOperations{
DidCreate: &protocol.FileOperationRegistrationOptions{
Filters: []protocol.FileOperationFilter{},
},
WillCreate: &protocol.FileOperationRegistrationOptions{
Filters: []protocol.FileOperationFilter{},
},
DidRename: &protocol.FileOperationRegistrationOptions{
Filters: []protocol.FileOperationFilter{},
},
WillRename: &protocol.FileOperationRegistrationOptions{
Filters: []protocol.FileOperationFilter{},
},
DidDelete: &protocol.FileOperationRegistrationOptions{
Filters: []protocol.FileOperationFilter{},
},
WillDelete: &protocol.FileOperationRegistrationOptions{
Filters: []protocol.FileOperationFilter{},
},
},
},
MonikerProvider: nil,
Experimental: nil,
},
ServerInfo: &protocol.ServerInfo{
Name: ServerName,
Version: ServerVersion,
},
}
return res
}
package lsputils
import (
"path/filepath"
"sort"
"strings"
"github.com/joyme123/protocol"
"github.com/joyme123/thrift-ls/parser"
"go.lsp.dev/uri"
)
func ASTNodeToRange(node parser.Node) protocol.Range {
return protocol.Range{
Start: protocol.Position{
Line: uint32(node.Pos().Line - 1),
Character: uint32(node.Pos().Col - 1),
},
End: protocol.Position{
Line: uint32(node.End().Line - 1),
Character: uint32(node.End().Col - 1),
},
}
}
// GetIncludeName return include name by file uri
// for example: file uri is file:///base.thrift, then `base` is include name
func GetIncludeName(file uri.URI) string {
fileName := file.Filename()
index := strings.LastIndexByte(fileName, filepath.Separator)
if index == -1 {
return fileName
}
fileName = string(fileName[index+1:])
index = strings.LastIndexByte(fileName, '.')
if index == -1 {
return fileName
}
return string(fileName[0:index])
}
// includeName: base.User. `base` is the includeName. returns ../../base.thrift
// if doesn't match, return empty string
func GetIncludePath(ast *parser.Document, includeName string) string {
for _, include := range ast.Includes {
if include.BadNode || include.Path == nil || include.Path.BadNode || include.Path.Value == nil {
continue
}
items := strings.Split(include.Path.Value.Text, "/")
path := items[len(items)-1]
if !strings.HasSuffix(path, ".thrift") {
continue
}
name := strings.TrimSuffix(path, ".thrift")
if name == includeName {
return include.Path.Value.Text
}
}
return ""
}
// cur is current file uri. for example file:///tmp/user.thrift
// includePath is include name used in code. for example: base.thrift
func IncludeURI(cur uri.URI, includePath string) uri.URI {
filePath := cur.Filename()
items := strings.Split(filePath, string(filepath.Separator))
basePath := strings.TrimSuffix(filePath, items[len(items)-1])
path := filepath.Join(basePath, includePath)
return uri.File(path)
}
// ParseIdent parse an identifier. identifier format:
// 1. identifier
// 2. include.identifier
//
// it returns include, ident
func ParseIdent(cur uri.URI, includes []*parser.Include, identifier string) (include, ident string) {
includeNames := IncludeNames(cur, includes)
// parse include from includeNames
sort.SliceStable(includeNames, func(i, j int) bool {
// sort by string length, make sure longest include match early
// examples:
// user.extra
// user
return len(includeNames[i]) > len(includeNames[j])
})
for _, incName := range includeNames {
prefix := incName + "."
if strings.HasPrefix(identifier, prefix) {
return incName, strings.TrimPrefix(identifier, prefix)
}
}
return "", identifier
}
// IncludeNames returns include names from include ast nodes
func IncludeNames(cur uri.URI, includes []*parser.Include) (includeNames []string) {
for _, inc := range includes {
if inc.Path != nil && inc.Path.Value != nil {
path := inc.Path.Value.Text
u := IncludeURI(cur, path)
includeName := GetIncludeName(u)
includeNames = append(includeNames, includeName)
}
}
return includeNames
}
package mapper
import (
"bytes"
"errors"
"fmt"
"sync"
"unicode/utf8"
"github.com/joyme123/thrift-ls/lsp/types"
"github.com/joyme123/thrift-ls/parser"
"go.lsp.dev/uri"
)
type Mapper struct {
fileURI uri.URI
content []byte
lineInit sync.Once
lineStart []int // line start 0-based byte offset. lsp: 0-based, parser: 1-based
nonASCII bool
}
// NewMapper ...
func NewMapper(fileURI uri.URI, content []byte) *Mapper {
return &Mapper{
fileURI: fileURI,
content: content,
}
}
func (m *Mapper) initLineStart() {
m.lineInit.Do(func() {
nlines := bytes.Count(m.content, []byte("\n"))
m.lineStart = make([]int, 1, nlines+1) // initially []int{0}
for offset, b := range m.content {
if b == '\n' {
m.lineStart = append(m.lineStart, offset+1)
}
if b >= utf8.RuneSelf {
m.nonASCII = true
}
}
})
}
func (m *Mapper) GetLSPEndPosition() types.Position {
m.initLineStart()
lastLineStart := m.lineStart[len(m.lineStart)-1]
lastLine := m.content[lastLineStart:]
utf16Len := utf16Count(lastLine)
return types.Position{
Line: uint32(len(m.lineStart)),
Character: uint32(utf16Len) - 1,
}
}
// convert from utf16-based to rune-based position
func (m *Mapper) LSPPosToParserPosition(pos types.Position) (parser.Position, error) {
m.initLineStart()
line := int(pos.Line) + 1
if line > len(m.lineStart) {
return parser.InvalidPosition, fmt.Errorf("invalid position line, request line: %d, total line: %d", line, len(m.lineStart))
}
if !m.nonASCII {
col := int(pos.Character) + 1
offset := m.lineStart[pos.Line] + int(pos.Character)
if offset > len(m.content) {
return parser.InvalidPosition, fmt.Errorf("invalid position offset: %d, total content: %d, %s", offset, len(m.content), string(m.content))
}
var lineLength int
if int(pos.Line+1) >= len(m.lineStart) {
lineLength = len(m.content) - m.lineStart[pos.Line]
} else {
lineLength = m.lineStart[pos.Line+1] - m.lineStart[pos.Line]
}
if col > lineLength+1 { // if line length is 0, col is 1 means col is at end of line
return parser.InvalidPosition, fmt.Errorf("invalid position column: %d, line length: %d, %s", col, lineLength, string(m.content))
}
return parser.Position{
Line: line,
Col: col,
Offset: offset,
}, nil
}
lineStart := m.lineStart[pos.Line]
lineEnd := 0
if int(pos.Line) == len(m.lineStart)-1 {
lineEnd = len(m.content)
} else {
lineEnd = m.lineStart[pos.Line+1]
}
lineBytes := m.content[lineStart:lineEnd]
utf16Col := -1
bytesCol := -1
for len(lineBytes) > 0 {
if utf16Col >= int(pos.Character) {
break
}
utf16Col++
if lineBytes[0] < utf8.RuneSelf {
lineBytes = lineBytes[1:]
bytesCol++
continue
}
r, size := utf8.DecodeRune(lineBytes)
if r >= 0x10000 {
utf16Col++
}
lineBytes = lineBytes[size:]
bytesCol += size
}
runeLen := utf8.RuneCount(m.content[lineStart : lineStart+bytesCol+1])
offset := lineStart + bytesCol
if offset >= len(m.content) {
return parser.InvalidPosition, errors.New("invalid position character")
}
/*
if offset >= m.lineStart[pos.Line+1] {
return parser.InvalidPosition, errors.New("invalid position character")
}
*/
return parser.Position{
Line: line,
Col: runeLen,
Offset: lineStart + bytesCol,
}, nil
}
func utf16Count(contents []byte) int {
utf16Len := 0
for len(contents) > 0 {
utf16Len++
r, size := utf8.DecodeRune(contents)
if r >= 0x10000 {
utf16Len++
}
contents = contents[size:]
}
return utf16Len
}
package memoize
import (
"context"
"fmt"
"runtime/trace"
"sync"
"go.lsp.dev/pkg/xcontext"
)
// Function is the type of a function that can be memoized.
//
// If the arg is a RefCounted, its Acquire/Release operations are called.
//
// The argument must not materially affect the result of the function
// in ways that are not captured by the promise's key, since if
// Promise.Get is called twice concurrently, with the same (implicit)
// key but different arguments, the Function is called only once but
// its result must be suitable for both callers.
//
// The main purpose of the argument is to avoid the Function closure
// needing to retain large objects (in practice: the snapshot) in
// memory that can be supplied at call time by any caller.
type Function func(ctx context.Context, arg interface{}) interface{}
// A RefCounted is a value whose functional lifetime is determined by
// reference counting.
//
// Its Acquire method is called before the Function is invoked, and
// the corresponding release is called when the Function returns.
// Usually both events happen within a single call to Get, so Get
// would be fine with a "borrowed" reference, but if the context is
// cancelled, Get may return before the Function is complete, causing
// the argument to escape, and potential premature destruction of the
// value. For a reference-counted type, this requires a pair of
// increment/decrement operations to extend its life.
type RefCounted interface {
// Acquire prevents the value from being destroyed until the
// returned function is called.
Acquire() func()
}
// A Promise represents the future result of a call to a function.
type Promise struct {
debug string // for observability
// refcount is the reference count in the containing Store, used by
// Store.Promise. It is guarded by Store.promisesMu on the containing Store.
refcount int32
mu sync.Mutex
// A Promise starts out IDLE, waiting for something to demand
// its evaluation. It then transitions into RUNNING state.
//
// While RUNNING, waiters tracks the number of Get calls
// waiting for a result, and the done channel is used to
// notify waiters of the next state transition. Once
// evaluation finishes, value is set, state changes to
// COMPLETED, and done is closed, unblocking waiters.
//
// Alternatively, as Get calls are cancelled, they decrement
// waiters. If it drops to zero, the inner context is
// cancelled, computation is abandoned, and state resets to
// IDLE to start the process over again.
state state
// done is set in running state, and closed when exiting it.
done chan struct{}
// cancel is set in running state. It cancels computation.
cancel context.CancelFunc
// waiters is the number of Gets outstanding.
waiters uint
// the function that will be used to populate the value
function Function
// value is set in completed state.
value interface{}
}
// NewPromise returns a promise for the future result of calling the
// specified function.
//
// The debug string is used to classify promises in logs and metrics.
// It should be drawn from a small set.
func NewPromise(debug string, function Function) *Promise {
if function == nil {
panic("nil function")
}
return &Promise{
debug: debug,
function: function,
}
}
type state int
const (
stateIdle = iota // newly constructed, or last waiter was cancelled
stateRunning // start was called and not cancelled
stateCompleted // function call ran to completion
)
// Cached returns the value associated with a promise.
//
// It will never cause the value to be generated.
// It will return the cached value, if present.
func (p *Promise) Cached() interface{} {
p.mu.Lock()
defer p.mu.Unlock()
if p.state == stateCompleted {
return p.value
}
return nil
}
// Get returns the value associated with a promise.
//
// All calls to Promise.Get on a given promise return the
// same result but the function is called (to completion) at most once.
//
// If the value is not yet ready, the underlying function will be invoked.
//
// If ctx is cancelled, Get returns (nil, Canceled).
// If all concurrent calls to Get are cancelled, the context provided
// to the function is cancelled. A later call to Get may attempt to
// call the function again.
func (p *Promise) Get(ctx context.Context, arg interface{}) (interface{}, error) {
if ctx.Err() != nil {
return nil, ctx.Err()
}
p.mu.Lock()
switch p.state {
case stateIdle:
return p.run(ctx, arg)
case stateRunning:
return p.wait(ctx)
case stateCompleted:
defer p.mu.Unlock()
return p.value, nil
default:
panic("unknown state")
}
}
// run starts p.function and returns the result. p.mu must be locked.
func (p *Promise) run(ctx context.Context, arg interface{}) (interface{}, error) {
childCtx, cancel := context.WithCancel(xcontext.Detach(ctx))
p.cancel = cancel
p.state = stateRunning
p.done = make(chan struct{})
function := p.function // Read under the lock
// Make sure that the argument isn't destroyed while we're running in it.
release := func() {}
if rc, ok := arg.(RefCounted); ok {
release = rc.Acquire()
}
go func() {
trace.WithRegion(childCtx, fmt.Sprintf("Promise.run %s", p.debug), func() {
defer release()
// Just in case the function does something expensive without checking
// the context, double-check we're still alive.
if childCtx.Err() != nil {
return
}
v := function(childCtx, arg)
if childCtx.Err() != nil {
return
}
p.mu.Lock()
defer p.mu.Unlock()
// It's theoretically possible that the promise has been cancelled out
// of the run that started us, and then started running again since we
// checked childCtx above. Even so, that should be harmless, since each
// run should produce the same results.
if p.state != stateRunning {
return
}
p.value = v
p.function = nil // aid GC
p.state = stateCompleted
close(p.done)
})
}()
return p.wait(ctx)
}
// wait waits for the value to be computed, or ctx to be cancelled. p.mu must be locked.
func (p *Promise) wait(ctx context.Context) (interface{}, error) {
p.waiters++
done := p.done
p.mu.Unlock()
select {
case <-done:
p.mu.Lock()
defer p.mu.Unlock()
if p.state == stateCompleted {
return p.value, nil
}
return nil, nil
case <-ctx.Done():
p.mu.Lock()
defer p.mu.Unlock()
p.waiters--
if p.waiters == 0 && p.state == stateRunning {
p.cancel()
close(p.done)
p.state = stateIdle
p.done = nil
p.cancel = nil
}
return nil, ctx.Err()
}
}
package memoize
import (
"reflect"
"sync"
"sync/atomic"
)
// An EvictionPolicy controls the eviction behavior of keys in a Store when
// they no longer have any references.
type EvictionPolicy int
const (
// ImmediatelyEvict evicts keys as soon as they no longer have references.
ImmediatelyEvict EvictionPolicy = iota
// NeverEvict does not evict keys.
NeverEvict
)
type Store struct {
evictionPolicy EvictionPolicy
promisesMu sync.Mutex
promises map[interface{}]*Promise
}
// Promise returns a reference-counted promise for the future result of
// calling the specified function.
//
// Calls to Promise with the same key return the same promise, incrementing its
// reference count. The caller must call the returned function to decrement
// the promise's reference count when it is no longer needed. The returned
// function must not be called more than once.
//
// Once the last reference has been released, the promise is removed from the
// store.
func (store *Store) Promise(key interface{}, function Function) (*Promise, func()) {
store.promisesMu.Lock()
p, ok := store.promises[key]
if !ok {
p = NewPromise(reflect.TypeOf(key).String(), function)
if store.promises == nil {
store.promises = map[interface{}]*Promise{}
}
store.promises[key] = p
}
p.refcount++
store.promisesMu.Unlock()
var released int32
release := func() {
if !atomic.CompareAndSwapInt32(&released, 0, 1) {
panic("release called more than once")
}
store.promisesMu.Lock()
p.refcount--
if p.refcount == 0 && store.evictionPolicy != NeverEvict {
// Inv: if p.refcount > 0, then store.promises[key] == p.
delete(store.promises, key)
}
store.promisesMu.Unlock()
}
return p, release
}
// Stats returns the number of each type of key in the store.
func (s *Store) Stats() map[reflect.Type]int {
result := map[reflect.Type]int{}
s.promisesMu.Lock()
defer s.promisesMu.Unlock()
for k := range s.promises {
result[reflect.TypeOf(k)]++
}
return result
}
// DebugOnlyIterate iterates through the store and, for each completed
// promise, calls f(k, v) for the map key k and function result v. It
// should only be used for debugging purposes.
func (s *Store) DebugOnlyIterate(f func(k, v interface{})) {
s.promisesMu.Lock()
defer s.promisesMu.Unlock()
for k, p := range s.promises {
if v := p.Cached(); v != nil {
f(k, v)
}
}
}
package lsp
import (
"context"
"github.com/joyme123/protocol"
"github.com/joyme123/thrift-ls/lsp/codejump"
)
func (s *Server) prepareRename(ctx context.Context, params *protocol.PrepareRenameParams) (*protocol.Range, error) {
file := params.TextDocument.URI
view, err := s.session.ViewOf(file)
if err != nil {
return nil, err
}
ss, release := view.Snapshot()
defer release()
return codejump.PrepareRename(ctx, ss, params.TextDocument.URI, params.Position)
}
func (s *Server) rename(ctx context.Context, params *protocol.RenameParams) (*protocol.WorkspaceEdit, error) {
file := params.TextDocument.URI
view, err := s.session.ViewOf(file)
if err != nil {
return nil, err
}
ss, release := view.Snapshot()
defer release()
return codejump.Rename(ctx, ss, params.TextDocument.URI, params.Position, params.NewName)
}
package lsp
import (
"context"
"github.com/joyme123/protocol"
"github.com/joyme123/thrift-ls/lsp/cache"
log "github.com/sirupsen/logrus"
)
type Server struct {
cache *cache.Cache
session *cache.Session
client protocol.Client
}
func NewServer(c *cache.Cache, client protocol.Client) *Server {
return &Server{
cache: c,
session: cache.NewSession(c),
client: client,
}
}
func (s *Server) Initialize(ctx context.Context, params *protocol.InitializeParams) (result *protocol.InitializeResult, err error) {
log.Debugln("------------Initialize called--------------")
defer log.Debugln("-----------Initialize finish--------------")
return s.initialize(ctx, params)
}
func (s *Server) Initialized(ctx context.Context, params *protocol.InitializedParams) (err error) {
return nil
}
func (s *Server) Shutdown(ctx context.Context) (err error) {
return nil
}
func (s *Server) Exit(ctx context.Context) (err error) {
return nil
}
func (s *Server) WorkDoneProgressCancel(ctx context.Context, params *protocol.WorkDoneProgressCancelParams) (err error) {
return nil
}
func (s *Server) LogTrace(ctx context.Context, params *protocol.LogTraceParams) (err error) {
return nil
}
func (s *Server) SetTrace(ctx context.Context, params *protocol.SetTraceParams) (err error) {
return nil
}
func (s *Server) CodeAction(ctx context.Context, params *protocol.CodeActionParams) (result []protocol.CodeAction, err error) {
return nil, nil
}
func (s *Server) CodeLens(ctx context.Context, params *protocol.CodeLensParams) (result []protocol.CodeLens, err error) {
return nil, nil
}
func (s *Server) CodeLensResolve(ctx context.Context, params *protocol.CodeLens) (result *protocol.CodeLens, err error) {
return nil, nil
}
func (s *Server) ColorPresentation(ctx context.Context, params *protocol.ColorPresentationParams) (result []protocol.ColorPresentation, err error) {
return nil, nil
}
func (s *Server) Completion(ctx context.Context, params *protocol.CompletionParams) (result *protocol.CompletionList, err error) {
log.Debugln("------------Completion called--------------")
defer log.Debugln("-----------Completion finish--------------")
return s.completion(ctx, params)
}
func (s *Server) CompletionResolve(ctx context.Context, params *protocol.CompletionItem) (result *protocol.CompletionItem, err error) {
return nil, nil
}
func (s *Server) Declaration(ctx context.Context, params *protocol.DeclarationParams) (result []protocol.Location, err error) {
return nil, nil
}
func (s *Server) Definition(ctx context.Context, params *protocol.DefinitionParams) (result []protocol.Location, err error) {
log.Debugln("-------------------Definition called-----------------")
defer log.Debugln("-------------------Definition finish-----------------")
return s.definition(ctx, params)
}
func (s *Server) DidChange(ctx context.Context, params *protocol.DidChangeTextDocumentParams) (err error) {
log.Debugln("-----------DidChange called-----------")
defer log.Debugln("-----------DidChange finish-----------")
return s.didChange(ctx, params)
}
func (s *Server) DidChangeConfiguration(ctx context.Context, params *protocol.DidChangeConfigurationParams) (err error) {
return nil
}
func (s *Server) DidChangeWatchedFiles(ctx context.Context, params *protocol.DidChangeWatchedFilesParams) (err error) {
return nil
}
func (s *Server) DidChangeWorkspaceFolders(ctx context.Context, params *protocol.DidChangeWorkspaceFoldersParams) (err error) {
return nil
}
func (s *Server) DidClose(ctx context.Context, params *protocol.DidCloseTextDocumentParams) (err error) {
return nil
}
func (s *Server) DidOpen(ctx context.Context, params *protocol.DidOpenTextDocumentParams) (err error) {
log.Debugln("-----------DidOpen called-----------")
defer log.Debugln("-----------DidOpen finish-----------")
return s.didOpen(ctx, params)
}
func (s *Server) DidSave(ctx context.Context, params *protocol.DidSaveTextDocumentParams) (err error) {
return nil
}
func (s *Server) DocumentColor(ctx context.Context, params *protocol.DocumentColorParams) (result []protocol.ColorInformation, err error) {
return nil, nil
}
func (s *Server) DocumentHighlight(ctx context.Context, params *protocol.DocumentHighlightParams) (result []protocol.DocumentHighlight, err error) {
return nil, nil
}
func (s *Server) DocumentLink(ctx context.Context, params *protocol.DocumentLinkParams) (result []protocol.DocumentLink, err error) {
return nil, nil
}
func (s *Server) DocumentLinkResolve(ctx context.Context, params *protocol.DocumentLink) (result *protocol.DocumentLink, err error) {
return nil, nil
}
func (s *Server) DocumentSymbol(ctx context.Context, params *protocol.DocumentSymbolParams) (result []interface{}, err error) {
log.Debugln("-----------DocumentSymbol called-----------")
defer log.Debugln("-----------DocumentSymbol finish-----------")
return s.documentSymbol(ctx, params)
}
func (s *Server) ExecuteCommand(ctx context.Context, params *protocol.ExecuteCommandParams) (result interface{}, err error) {
return nil, nil
}
func (s *Server) FoldingRanges(ctx context.Context, params *protocol.FoldingRangeParams) (result []protocol.FoldingRange, err error) {
return nil, nil
}
func (s *Server) Formatting(ctx context.Context, params *protocol.DocumentFormattingParams) (result []protocol.TextEdit, err error) {
log.Debugln("-----------Formatting called-----------")
defer log.Debugln("-----------Formatting finish-----------")
return s.formatting(ctx, params)
}
func (s *Server) Hover(ctx context.Context, params *protocol.HoverParams) (result *protocol.Hover, err error) {
log.Debugln("------------hover called----------------")
defer log.Debugln("------------hover finish------------")
return s.hover(ctx, params)
}
func (s *Server) Implementation(ctx context.Context, params *protocol.ImplementationParams) (result []protocol.Location, err error) {
return nil, nil
}
func (s *Server) OnTypeFormatting(ctx context.Context, params *protocol.DocumentOnTypeFormattingParams) (result []protocol.TextEdit, err error) {
return nil, nil
}
func (s *Server) PrepareRename(ctx context.Context, params *protocol.PrepareRenameParams) (result *protocol.Range, err error) {
log.Debugln("--------------------PrepareRename called----------------------")
defer log.Debugln("--------------------PrepareRename finish----------------------")
return s.prepareRename(ctx, params)
}
func (s *Server) RangeFormatting(ctx context.Context, params *protocol.DocumentRangeFormattingParams) (result []protocol.TextEdit, err error) {
return nil, nil
}
func (s *Server) References(ctx context.Context, params *protocol.ReferenceParams) (result []protocol.Location, err error) {
log.Debugln("--------------------References called----------------------")
defer log.Debugln("--------------------References finish----------------------")
return s.references(ctx, params)
}
func (s *Server) Rename(ctx context.Context, params *protocol.RenameParams) (result *protocol.WorkspaceEdit, err error) {
log.Debugln("--------------------Rename called----------------------")
defer log.Debugln("--------------------Rename finish----------------------")
return s.rename(ctx, params)
}
func (s *Server) SignatureHelp(ctx context.Context, params *protocol.SignatureHelpParams) (result *protocol.SignatureHelp, err error) {
return nil, nil
}
func (s *Server) Symbols(ctx context.Context, params *protocol.WorkspaceSymbolParams) (result []protocol.SymbolInformation, err error) {
return nil, nil
}
func (s *Server) TypeDefinition(ctx context.Context, params *protocol.TypeDefinitionParams) (result []protocol.Location, err error) {
log.Debugln("--------------------TypeDefinition called----------------------")
defer log.Debugln("--------------------TypeDefinition finish----------------------")
return s.typeDefinition(ctx, params)
}
func (s *Server) WillSave(ctx context.Context, params *protocol.WillSaveTextDocumentParams) (err error) {
return nil
}
func (s *Server) WillSaveWaitUntil(ctx context.Context, params *protocol.WillSaveTextDocumentParams) (result []protocol.TextEdit, err error) {
return nil, nil
}
func (s *Server) ShowDocument(ctx context.Context, params *protocol.ShowDocumentParams) (result *protocol.ShowDocumentResult, err error) {
return nil, nil
}
func (s *Server) WillCreateFiles(ctx context.Context, params *protocol.CreateFilesParams) (result *protocol.WorkspaceEdit, err error) {
return nil, nil
}
func (s *Server) DidCreateFiles(ctx context.Context, params *protocol.CreateFilesParams) (err error) {
return nil
}
func (s *Server) WillRenameFiles(ctx context.Context, params *protocol.RenameFilesParams) (result *protocol.WorkspaceEdit, err error) {
return nil, nil
}
func (s *Server) DidRenameFiles(ctx context.Context, params *protocol.RenameFilesParams) (err error) {
return nil
}
func (s *Server) WillDeleteFiles(ctx context.Context, params *protocol.DeleteFilesParams) (result *protocol.WorkspaceEdit, err error) {
return nil, nil
}
func (s *Server) DidDeleteFiles(ctx context.Context, params *protocol.DeleteFilesParams) (err error) {
return nil
}
func (s *Server) CodeLensRefresh(ctx context.Context) (err error) {
return nil
}
func (s *Server) PrepareCallHierarchy(ctx context.Context, params *protocol.CallHierarchyPrepareParams) (result []protocol.CallHierarchyItem, err error) {
return nil, nil
}
func (s *Server) IncomingCalls(ctx context.Context, params *protocol.CallHierarchyIncomingCallsParams) (result []protocol.CallHierarchyIncomingCall, err error) {
return nil, nil
}
func (s *Server) OutgoingCalls(ctx context.Context, params *protocol.CallHierarchyOutgoingCallsParams) (result []protocol.CallHierarchyOutgoingCall, err error) {
return nil, nil
}
func (s *Server) SemanticTokensFull(ctx context.Context, params *protocol.SemanticTokensParams) (result *protocol.SemanticTokens, err error) {
return nil, nil
}
func (s *Server) SemanticTokensFullDelta(ctx context.Context, params *protocol.SemanticTokensDeltaParams) (result interface{}, err error) {
return nil, nil
}
func (s *Server) SemanticTokensRange(ctx context.Context, params *protocol.SemanticTokensRangeParams) (result *protocol.SemanticTokens, err error) {
return nil, nil
}
func (s *Server) SemanticTokensRefresh(ctx context.Context) (err error) {
return nil
}
func (s *Server) LinkedEditingRange(ctx context.Context, params *protocol.LinkedEditingRangeParams) (result *protocol.LinkedEditingRanges, err error) {
return nil, nil
}
func (s *Server) Moniker(ctx context.Context, params *protocol.MonikerParams) (result []protocol.Moniker, err error) {
return nil, nil
}
// Request handles all no standard request
func (s *Server) Request(ctx context.Context, method string, params interface{}) (result interface{}, err error) {
return nil, nil
}
package lsp
import (
"context"
"github.com/joyme123/protocol"
"github.com/joyme123/thrift-ls/lsp/cache"
"github.com/joyme123/thrift-ls/lsp/memoize"
"go.lsp.dev/jsonrpc2"
"go.lsp.dev/pkg/event"
"go.uber.org/zap"
)
type StreamServer struct {
logger *zap.Logger
cache *cache.Cache
}
func NewStreamServer() *StreamServer {
logger, _ := zap.NewProduction()
store := &memoize.Store{}
return &StreamServer{
cache: cache.New(store),
logger: logger,
}
}
func (s *StreamServer) ServeStream(ctx context.Context, conn jsonrpc2.Conn) error {
client := protocol.ClientDispatcher(conn, s.logger)
server := NewServer(s.cache, client)
// Clients may or may not send a shutdown message. Make sure the server is
// shut down.
// TODO(rFindley): this shutdown should perhaps be on a disconnected context.
defer func() {
if err := server.Shutdown(ctx); err != nil {
event.Error(ctx, "error shutting down", err)
}
}()
ctx = protocol.WithClient(ctx, client)
conn.Go(ctx,
DebugHandler(
protocol.Handlers(
protocol.ServerHandler(server, jsonrpc2.MethodNotFoundHandler))))
<-conn.Done()
return conn.Err()
}
package lsp
import (
"context"
"github.com/joyme123/protocol"
"github.com/joyme123/thrift-ls/lsp/symbols"
)
func (s *Server) documentSymbol(ctx context.Context, params *protocol.DocumentSymbolParams) (result []interface{}, err error) {
file := params.TextDocument.URI
view, err := s.session.ViewOf(file)
if err != nil {
return nil, err
}
ss, release := view.Snapshot()
defer release()
symbols := symbols.DocumentSymbols(ctx, ss, file)
for i := range symbols {
result = append(result, symbols[i])
}
return
}
package symbols
import (
"github.com/joyme123/protocol"
"github.com/joyme123/thrift-ls/lsp/lsputils"
"github.com/joyme123/thrift-ls/parser"
)
func ConstSymbol(cst *parser.Const) *protocol.DocumentSymbol {
if cst.IsBadNode() || cst.ChildrenBadNode() {
return nil
}
res := &protocol.DocumentSymbol{
Name: cst.Name.Name.Text,
Kind: protocol.SymbolKindConstant,
Range: lsputils.ASTNodeToRange(cst.Name.Name),
SelectionRange: lsputils.ASTNodeToRange(cst.Name.Name),
}
return res
}
package symbols
import (
"context"
"errors"
"github.com/joyme123/protocol"
"github.com/joyme123/thrift-ls/lsp/cache"
"go.lsp.dev/uri"
)
func DocumentSymbols(ctx context.Context, ss *cache.Snapshot, file uri.URI) []*protocol.DocumentSymbol {
res := make([]*protocol.DocumentSymbol, 0)
pf, err := ss.Parse(ctx, file)
if err != nil {
return res
}
if pf.AST() == nil {
err = errors.New("parse ast failed")
return res
}
doc := pf.AST()
for i := range doc.Typedefs {
child := TypedefSymbol(doc.Typedefs[i])
if child != nil {
res = append(res, child)
}
}
for i := range doc.Consts {
child := ConstSymbol(doc.Consts[i])
if child != nil {
res = append(res, child)
}
}
for i := range doc.Structs {
child := StructSymbol(doc.Structs[i])
if child != nil {
res = append(res, child)
}
}
for i := range doc.Unions {
child := UnionSymbol(doc.Unions[i])
if child != nil {
res = append(res, child)
}
}
for i := range doc.Exceptions {
child := ExceptionSymbol(doc.Exceptions[i])
if child != nil {
res = append(res, child)
}
}
for i := range doc.Services {
child := ServiceSymbol(doc.Services[i])
if child != nil {
res = append(res, child)
}
}
return res
}
package symbols
import (
"strconv"
"github.com/joyme123/protocol"
"github.com/joyme123/thrift-ls/lsp/lsputils"
"github.com/joyme123/thrift-ls/parser"
)
func EnumSymbol(enum *parser.Enum) *protocol.DocumentSymbol {
if enum.IsBadNode() || enum.ChildrenBadNode() {
return nil
}
res := &protocol.DocumentSymbol{
Name: enum.Name.Name.Text,
Detail: "Enum",
Kind: protocol.SymbolKindEnum,
Range: lsputils.ASTNodeToRange(enum.Name.Name),
SelectionRange: lsputils.ASTNodeToRange(enum.Name.Name),
}
for i := range enum.Values {
child := EnumValueSymbol(enum.Values[i])
if child == nil {
continue
}
res.Children = append(res.Children, *child)
}
return res
}
func EnumValueSymbol(v *parser.EnumValue) *protocol.DocumentSymbol {
if v.IsBadNode() || v.ChildrenBadNode() {
return nil
}
res := &protocol.DocumentSymbol{
Name: v.Name.Name.Text,
Detail: strconv.FormatInt(v.Value, 10),
Kind: protocol.SymbolKindNumber,
Range: lsputils.ASTNodeToRange(v.Name.Name),
SelectionRange: lsputils.ASTNodeToRange(v.Name.Name),
}
return res
}
package symbols
import (
"github.com/joyme123/protocol"
"github.com/joyme123/thrift-ls/format"
"github.com/joyme123/thrift-ls/lsp/lsputils"
"github.com/joyme123/thrift-ls/parser"
)
func FieldSymbol(field *parser.Field) *protocol.DocumentSymbol {
if field.IsBadNode() || field.ChildrenBadNode() {
return nil
}
detail := ""
if field.RequiredKeyword != nil {
detail = field.RequiredKeyword.Literal.Text + " "
}
detail += format.MustFormatFieldType(field.FieldType)
res := &protocol.DocumentSymbol{
Name: field.Identifier.Name.Text,
Detail: detail,
Kind: protocol.SymbolKindField,
Range: lsputils.ASTNodeToRange(field.Identifier.Name),
SelectionRange: lsputils.ASTNodeToRange(field.Identifier.Name),
}
return res
}
package symbols
import (
"github.com/joyme123/protocol"
"github.com/joyme123/thrift-ls/lsp/lsputils"
"github.com/joyme123/thrift-ls/parser"
)
func ServiceSymbol(svc *parser.Service) *protocol.DocumentSymbol {
if svc.IsBadNode() || svc.ChildrenBadNode() {
return nil
}
res := &protocol.DocumentSymbol{
Name: svc.Name.Name.Text,
Kind: protocol.SymbolKindInterface,
Range: lsputils.ASTNodeToRange(svc.Name.Name),
SelectionRange: lsputils.ASTNodeToRange(svc.Name.Name),
}
for i := range svc.Functions {
child := FunctionSymbol(svc.Functions[i])
if child != nil {
res.Children = append(res.Children, *child)
}
}
return res
}
func FunctionSymbol(fn *parser.Function) *protocol.DocumentSymbol {
if fn.IsBadNode() || fn.ChildrenBadNode() {
return nil
}
res := &protocol.DocumentSymbol{
Name: fn.Name.Name.Text,
Kind: protocol.SymbolKindFunction,
Range: lsputils.ASTNodeToRange(fn.Name.Name),
SelectionRange: lsputils.ASTNodeToRange(fn.Name.Name),
}
return res
}
package symbols
import (
"github.com/joyme123/protocol"
"github.com/joyme123/thrift-ls/lsp/lsputils"
"github.com/joyme123/thrift-ls/parser"
)
func StructSymbol(st *parser.Struct) *protocol.DocumentSymbol {
if st.IsBadNode() || st.ChildrenBadNode() {
return nil
}
res := &protocol.DocumentSymbol{
Name: st.Identifier.Name.Text,
Detail: "Struct",
Kind: protocol.SymbolKindStruct,
Range: lsputils.ASTNodeToRange(st.Identifier.Name),
SelectionRange: lsputils.ASTNodeToRange(st.Identifier.Name),
}
for i := range st.Fields {
child := FieldSymbol(st.Fields[i])
if child == nil {
continue
}
res.Children = append(res.Children, *child)
}
return res
}
func UnionSymbol(un *parser.Union) *protocol.DocumentSymbol {
if un.IsBadNode() || un.ChildrenBadNode() {
return nil
}
res := &protocol.DocumentSymbol{
Name: un.Name.Name.Text,
Detail: "Union",
Kind: protocol.SymbolKindStruct,
Range: lsputils.ASTNodeToRange(un.Name.Name),
SelectionRange: lsputils.ASTNodeToRange(un.Name.Name),
}
for i := range un.Fields {
child := FieldSymbol(un.Fields[i])
if child == nil {
continue
}
res.Children = append(res.Children, *child)
}
return res
}
func ExceptionSymbol(ex *parser.Exception) *protocol.DocumentSymbol {
if ex.IsBadNode() || ex.ChildrenBadNode() {
return nil
}
res := &protocol.DocumentSymbol{
Name: ex.Name.Name.Text,
Detail: "Exception",
Kind: protocol.SymbolKindStruct,
Range: lsputils.ASTNodeToRange(ex.Name.Name),
SelectionRange: lsputils.ASTNodeToRange(ex.Name.Name),
}
for i := range ex.Fields {
child := FieldSymbol(ex.Fields[i])
if child == nil {
continue
}
res.Children = append(res.Children, *child)
}
return res
}
package symbols
import (
"github.com/joyme123/protocol"
"github.com/joyme123/thrift-ls/format"
"github.com/joyme123/thrift-ls/lsp/lsputils"
"github.com/joyme123/thrift-ls/parser"
)
func TypedefSymbol(td *parser.Typedef) *protocol.DocumentSymbol {
res := &protocol.DocumentSymbol{
Name: td.Alias.Name.Text,
Detail: format.MustFormatFieldType(td.T),
Kind: protocol.SymbolKindTypeParameter,
Range: lsputils.ASTNodeToRange(td.Alias.Name),
SelectionRange: lsputils.ASTNodeToRange(td.Alias.Name),
}
return res
}
package main
import (
"context"
"errors"
"flag"
"fmt"
"io"
"math/rand"
"os"
"path/filepath"
"time"
"github.com/joyme123/thrift-ls/format"
tlog "github.com/joyme123/thrift-ls/log"
"github.com/joyme123/thrift-ls/lsp"
"github.com/joyme123/thrift-ls/parser"
"github.com/joyme123/thrift-ls/utils/diff"
"go.lsp.dev/jsonrpc2"
"go.lsp.dev/pkg/fakenet"
"gopkg.in/yaml.v2"
)
type Options struct {
LogLevel int `yaml:"logLevel"` // 1: fatal, 2: error, 3: warn, 4: info, 5: debug, 6: trace
}
func main_format(opt format.Options, file string) error {
if file == "" {
err := errors.New("must specified a thrift file to format")
fmt.Println(err)
return err
}
content, err := os.ReadFile(file)
if err != nil {
fmt.Println(err)
return err
}
thrift_file := filepath.Base(file)
ast, err := parser.Parse(thrift_file, content)
if err != nil {
fmt.Println(err)
return err
}
formated, err := format.FormatDocumentWithValidation(ast.(*parser.Document), true)
if err != nil {
fmt.Println(err)
return err
}
if opt.Write {
var perms os.FileMode
fileInfo, err := os.Stat(file)
if err != nil {
fmt.Println(err)
return err
}
perms = fileInfo.Mode() // 使用原文件的权限
// overwrite
err = os.WriteFile(file, []byte(formated), perms)
if err != nil {
fmt.Println(err)
return err
}
} else {
if opt.Diff {
diffLines := diff.Diff("old", content, "new", []byte(formated))
fmt.Print(string(diffLines))
} else {
fmt.Print(formated)
}
return err
}
return nil
}
func main() {
rand.Seed(time.Now().UnixMilli())
formatter := false
formatFile := ""
flag.BoolVar(&formatter, "format", false, "use thrift-ls as a format tool")
flag.StringVar(&formatFile, "f", "", "file path to format")
formatOpts := format.Options{}
formatOpts.SetFlags()
flag.Parse()
formatOpts.InitDefault()
opts := configInit()
tlog.Init(opts.LogLevel)
if formatter {
main_format(formatOpts, formatFile)
return
}
ctx := context.Background()
// server := &lsp.Server{}
// handler := protocol.ServerHandler(server, nil)
//
// streamServer := jsonrpc2.HandlerServer(handler)
// if err := jsonrpc2.ListenAndServe(ctx, "tcp", "127.0.0.1:8000", streamServer, 60*time.Second); err != nil {
// panic(err)
// }
ss := lsp.NewStreamServer()
stream := jsonrpc2.NewStream(fakenet.NewConn("stdio", os.Stdin, os.Stdout))
conn := jsonrpc2.NewConn(stream)
err := ss.ServeStream(ctx, conn)
if errors.Is(err, io.EOF) {
return
}
panic(err)
}
func configInit() *Options {
opts := &Options{}
logLevel := -1
flag.IntVar(&logLevel, "logLevel", -1, "set log level")
flag.Parse()
dir, err := os.UserHomeDir()
if err != nil {
dir = os.TempDir()
}
dir = dir + "/.thriftls"
configFile := dir + "/config.yaml"
data, err := os.ReadFile(configFile)
if err == nil {
yaml.Unmarshal(data, opts)
}
if logLevel >= 0 {
opts.LogLevel = logLevel // flag can override config file
}
if opts.LogLevel == 0 {
opts.LogLevel = 3
}
return opts
}
package parser
import (
"path"
"strings"
"unicode/utf8"
)
type Node interface {
// position of first charactor of this node
Pos() Position
// position of first charactor immediately after this node
End() Position
Contains(pos Position) bool
Children() []Node
Type() string
IsBadNode() bool
ChildrenBadNode() bool
// Equals checks ast equals between two node
Equals(node Node) bool
}
type Document struct {
Filename string
BadHeaders []*BadHeader
Includes []*Include
CPPIncludes []*CPPInclude
Namespaces []*Namespace
Consts []*Const
Typedefs []*Typedef
Enums []*Enum
Services []*Service
Structs []*Struct
Unions []*Union
Exceptions []*Exception
BadDefinitions []*BadDefinition
Comments []*Comment // Comments at end of doc
Nodes []Node
Location
}
func NewDocument(headers []Header, defs []Definition, comments []*Comment, loc Location) *Document {
doc := &Document{
Location: loc,
}
for _, header := range headers {
switch header.Type() {
case "Include":
doc.Includes = append(doc.Includes, header.(*Include))
case "CPPInclude":
doc.CPPIncludes = append(doc.CPPIncludes, header.(*CPPInclude))
case "Namespace":
doc.Namespaces = append(doc.Namespaces, header.(*Namespace))
case "BadHeader":
doc.BadHeaders = append(doc.BadHeaders, header.(*BadHeader))
}
doc.Nodes = append(doc.Nodes, header)
}
for _, def := range defs {
switch def.Type() {
case "Const":
doc.Consts = append(doc.Consts, def.(*Const))
case "Typedef":
doc.Typedefs = append(doc.Typedefs, def.(*Typedef))
case "Enum":
doc.Enums = append(doc.Enums, def.(*Enum))
case "Service":
doc.Services = append(doc.Services, def.(*Service))
case "Struct":
doc.Structs = append(doc.Structs, def.(*Struct))
case "Union":
doc.Unions = append(doc.Unions, def.(*Union))
case "Exception":
doc.Exceptions = append(doc.Exceptions, def.(*Exception))
case "BadDefinition":
doc.BadDefinitions = append(doc.BadDefinitions, def.(*BadDefinition))
}
doc.Nodes = append(doc.Nodes, def)
}
doc.Comments = comments
for _, comment := range comments {
doc.Nodes = append(doc.Nodes, comment)
}
return doc
}
func (d *Document) Children() []Node {
return d.Nodes
}
func (d *Document) Type() string {
return "Document"
}
func (d *Document) IsBadNode() bool {
return false
}
func (d *Document) ChildrenBadNode() bool {
children := d.Children()
for i := range children {
if children[i].IsBadNode() {
return true
}
if children[i].ChildrenBadNode() {
return true
}
}
return false
}
func (d *Document) Equals(node Node) bool {
doc, ok := node.(*Document)
if !ok {
return false
}
if (d == nil && doc != nil) ||
(d != nil && doc == nil) {
return false
} else if d == nil && doc == nil {
return true
}
if len(doc.Nodes) != len(d.Nodes) {
return false
}
for i := range d.Nodes {
if !d.Nodes[i].Equals(doc.Nodes[i]) {
return false
}
}
return true
}
type Header interface {
Type() string
SetComments(comments []*Comment, endLineComments []*Comment)
SetLocation(loc Location)
Node
}
type BadHeader struct {
BadNode bool
Location
}
func NewBadHeader(loc Location) *BadHeader {
return &BadHeader{
BadNode: true,
Location: loc,
}
}
func (h *BadHeader) Type() string {
return "BadHeader"
}
func (h *BadHeader) Children() []Node {
return nil
}
func (h *BadHeader) IsBadNode() bool {
return true
}
func (h *BadHeader) ChildrenBadNode() bool {
return false
}
func (h *BadHeader) SetComments([]*Comment, []*Comment) {
}
func (h *BadHeader) Equals(node Node) bool {
hn, ok := node.(*BadHeader)
if !ok {
return false
}
if (h != nil && hn == nil) ||
(h == nil && hn != nil) {
return false
}
return true
}
func (h *BadHeader) SetLocation(loc Location) {
h.Location = loc
}
type KeywordLiteral struct {
Text string
BadNode bool
Location
}
func NewKeywordLiteral(c *current) *KeywordLiteral {
return &KeywordLiteral{
Text: string(c.text),
Location: NewLocationFromCurrent(c),
}
}
func NewBadKeywordLiteral(c *current) *KeywordLiteral {
return &KeywordLiteral{
Text: string(c.text),
BadNode: true,
Location: NewLocationFromCurrent(c),
}
}
func (k *KeywordLiteral) Type() string {
return "KeywordLiteral"
}
func (k *KeywordLiteral) IsBadNode() bool {
return k.BadNode
}
func (k *KeywordLiteral) Children() []Node {
return nil
}
func (k *KeywordLiteral) ChildrenBadNode() bool {
return false
}
func (k *KeywordLiteral) Equals(node Node) bool {
kl, ok := node.(*KeywordLiteral)
if !ok {
return false
}
if (k == nil && kl != nil) ||
(k != nil && kl == nil) {
return false
} else if k == nil && kl == nil {
return true
}
if k.BadNode != kl.BadNode {
return false
}
return k.Text == kl.Text
}
type Keyword struct {
Comments []*Comment
Literal *KeywordLiteral
BadNode bool
Location
}
func NewKeyword(comments []*Comment, literal *KeywordLiteral, loc Location) Keyword {
return Keyword{
Literal: literal,
Comments: comments,
Location: loc,
}
}
func (i *Keyword) Children() []Node {
return nil
}
func (i *Keyword) IsBadNode() bool {
return i.BadNode
}
func (i *Keyword) ChildrenBadNode() bool {
return false
}
func (i *Keyword) Equals(k *Keyword) bool {
if (i == nil && k != nil) ||
(i != nil && k == nil) {
return false
} else if i == nil && k == nil {
return true
}
if i.BadNode != k.BadNode {
return false
}
if !i.Literal.Equals(k.Literal) {
return false
}
if len(i.Comments) != len(k.Comments) {
return false
}
for n := range i.Comments {
if !i.Comments[n].Equals(k.Comments[n]) {
return false
}
}
return true
}
type IncludeKeyword struct {
Keyword
}
func (i *IncludeKeyword) Type() string {
return "IncludeKeyword"
}
func (i *IncludeKeyword) Equals(node Node) bool {
ik, ok := node.(*IncludeKeyword)
if !ok {
return false
}
if (i == nil && ik != nil) ||
(i != nil && ik == nil) {
return false
} else if i == nil && ik == nil {
return true
}
return ik.Keyword.Equals(&i.Keyword)
}
type Include struct {
IncludeKeyword *IncludeKeyword
Path *Literal
Comments []*Comment
EndLineComments []*Comment
BadNode bool
Location
}
func NewInclude(keyword *IncludeKeyword, path *Literal, loc Location) *Include {
return &Include{
IncludeKeyword: keyword,
Location: loc,
Path: path,
}
}
func NewBadInclude(loc Location) *Include {
return &Include{
BadNode: true,
Location: loc,
}
}
func (i *Include) Type() string {
return "Include"
}
func (i *Include) SetComments(comments []*Comment, endLineComments []*Comment) {
i.Comments = comments
i.EndLineComments = endLineComments
}
func (i *Include) Name() string {
_, file := path.Split(i.Path.Value.Text)
name := strings.TrimRight(file, path.Ext(file))
return name
}
func (i *Include) Children() []Node {
nodes := []Node{i.IncludeKeyword, i.Path}
for _, com := range i.Comments {
nodes = append(nodes, com)
}
for _, com := range i.EndLineComments {
nodes = append(nodes, com)
}
return nodes
}
func (i *Include) IsBadNode() bool {
return i.BadNode
}
func (i *Include) ChildrenBadNode() bool {
children := i.Children()
for i := range children {
if children[i].IsBadNode() {
return true
}
if children[i].ChildrenBadNode() {
return true
}
}
return false
}
func (i *Include) SetLocation(loc Location) {
i.Location = loc
}
func (i *Include) Equals(node Node) bool {
in, ok := node.(*Include)
if !ok {
return false
}
if (i == nil && in != nil) ||
(i != nil && in == nil) {
return false
} else if i == nil && in == nil {
return true
}
if i.BadNode != in.BadNode {
return false
}
if !i.IncludeKeyword.Equals(in.IncludeKeyword) {
return false
}
if !i.Path.Equals(in.Path) {
return false
}
if len(i.Comments) != len(in.Comments) {
return false
}
for n := range i.Comments {
if !i.Comments[n].Equals(in.Comments[n]) {
return false
}
}
if len(i.EndLineComments) != len(in.EndLineComments) {
return false
}
for n := range i.EndLineComments {
if !i.EndLineComments[n].Equals(in.EndLineComments[n]) {
return false
}
}
return true
}
type CPPIncludeKeyword struct {
Keyword
}
func (c *CPPIncludeKeyword) Type() string {
return "CPPIncludeKeyword"
}
func (c *CPPIncludeKeyword) Equals(node Node) bool {
cn, ok := node.(*CPPIncludeKeyword)
if !ok {
return false
}
if (c == nil && cn != nil) ||
(c != nil && cn == nil) {
return false
} else if c == nil && cn == nil {
return true
}
return c.Keyword.Equals(&cn.Keyword)
}
type CPPInclude struct {
CPPIncludeKeyword *CPPIncludeKeyword
Path *Literal
Comments []*Comment
EndLineComments []*Comment
BadNode bool
Location
}
func NewCPPInclude(keyword *CPPIncludeKeyword, path *Literal, loc Location) *CPPInclude {
return &CPPInclude{
CPPIncludeKeyword: keyword,
Location: loc,
Path: path,
}
}
func NewBadCPPInclude(loc Location) *CPPInclude {
return &CPPInclude{
BadNode: true,
Location: loc,
}
}
func (i *CPPInclude) Type() string {
return "CPPInclude"
}
func (i *CPPInclude) SetComments(comments []*Comment, endLineComments []*Comment) {
i.Comments = comments
i.EndLineComments = endLineComments
}
func (i *CPPInclude) Children() []Node {
res := []Node{i.CPPIncludeKeyword, i.Path}
for _, com := range i.Comments {
res = append(res, com)
}
for _, com := range i.EndLineComments {
res = append(res, com)
}
return res
}
func (i *CPPInclude) IsBadNode() bool {
return i.BadNode
}
func (i *CPPInclude) ChildrenBadNode() bool {
children := i.Children()
for i := range children {
if children[i].IsBadNode() {
return true
}
if children[i].ChildrenBadNode() {
return true
}
}
return false
}
func (i *CPPInclude) SetLocation(loc Location) {
i.Location = loc
}
func (i *CPPInclude) Equals(node Node) bool {
cn, ok := node.(*CPPInclude)
if !ok {
return false
}
if (i == nil && cn != nil) ||
(i != nil && cn == nil) {
return false
} else if i == nil && cn == nil {
return true
}
if i.BadNode != cn.BadNode {
return false
}
if !i.CPPIncludeKeyword.Equals(cn.CPPIncludeKeyword) {
return false
}
if !i.Path.Equals(cn.Path) {
return false
}
if len(i.Comments) != len(cn.Comments) {
return false
}
for n := range i.Comments {
if !i.Comments[n].Equals(cn.Comments[n]) {
return false
}
}
if len(i.EndLineComments) != len(cn.EndLineComments) {
return false
}
for n := range i.EndLineComments {
if !i.EndLineComments[n].Equals(cn.EndLineComments[n]) {
return false
}
}
return true
}
type NamespaceKeyword struct {
Keyword
}
func (n *NamespaceKeyword) Type() string {
return "NamespaceKeyword"
}
func (n *NamespaceKeyword) Equals(node Node) bool {
nn, ok := node.(*NamespaceKeyword)
if !ok {
return false
}
if (n == nil && nn != nil) ||
(n != nil && nn == nil) {
return false
} else if n == nil && nn == nil {
return true
}
return n.Keyword.Equals(&nn.Keyword)
}
type NamespaceScope struct {
Identifier
}
func (ns *NamespaceScope) Equals(node Node) bool {
nn, ok := node.(*NamespaceScope)
if !ok {
return false
}
if (ns == nil && nn != nil) ||
(ns != nil && nn == nil) {
return false
} else if ns == nil && nn == nil {
return true
}
return ns.Identifier.Equals(&nn.Identifier)
}
type Namespace struct {
NamespaceKeyword *NamespaceKeyword
Language *NamespaceScope
Name *Identifier
Annotations *Annotations
Comments []*Comment
EndLineComments []*Comment
BadNode bool
Location
}
func NewNamespace(keyword *NamespaceKeyword, language *NamespaceScope, name *Identifier, annotations *Annotations, loc Location) *Namespace {
return &Namespace{
NamespaceKeyword: keyword,
Language: language,
Name: name,
Annotations: annotations,
Location: loc,
}
}
func NewBadNamespace(loc Location) *Namespace {
return &Namespace{
BadNode: true,
Location: loc,
}
}
func (n *Namespace) Type() string {
return "Namespace"
}
func (n *Namespace) SetComments(comments []*Comment, endLineComments []*Comment) {
n.Comments = comments
n.EndLineComments = endLineComments
}
func (n *Namespace) Children() []Node {
ret := []Node{n.NamespaceKeyword, n.Language, n.Name}
for i := range n.Comments {
ret = append(ret, n.Comments[i])
}
for i := range n.EndLineComments {
ret = append(ret, n.EndLineComments[i])
}
if n.Annotations != nil {
ret = append(ret, n.Annotations)
}
return ret
}
func (n *Namespace) IsBadNode() bool {
return n.BadNode
}
func (n *Namespace) ChildrenBadNode() bool {
children := n.Children()
for i := range children {
if children[i].IsBadNode() {
return true
}
if children[i].ChildrenBadNode() {
return true
}
}
return false
}
func (n *Namespace) SetLocation(loc Location) {
n.Location = loc
}
func (n *Namespace) Equals(node Node) bool {
nn, ok := node.(*Namespace)
if !ok {
return false
}
if (n == nil && nn != nil) ||
(n != nil && nn == nil) {
return false
} else if n == nil && nn == nil {
return true
}
if n.BadNode != nn.BadNode {
return false
}
if !n.NamespaceKeyword.Equals(nn.NamespaceKeyword) {
return false
}
if !n.Language.Equals(nn.Language) {
return false
}
if !n.Name.Equals(nn.Name) {
return false
}
if !n.Annotations.Equals(nn.Annotations) {
return false
}
if len(n.Comments) != len(nn.Comments) {
return false
}
for i := range n.Comments {
if !n.Comments[i].Equals(nn.Comments[i]) {
return false
}
}
if len(n.EndLineComments) != len(nn.EndLineComments) {
return false
}
for i := range n.EndLineComments {
if !n.EndLineComments[i].Equals(nn.EndLineComments[i]) {
return false
}
}
return true
}
type Definition interface {
Node
Type() string
SetComments(comments []*Comment, endLineComments []*Comment)
SetAnnotations(annotations *Annotations)
SetLocation(loc Location)
}
type BadDefinition struct {
BadNode bool
Location
}
func NewBadDefinition(loc Location) *BadDefinition {
return &BadDefinition{
BadNode: true,
Location: loc,
}
}
func (d *BadDefinition) Type() string {
return "Definition"
}
func (d *BadDefinition) Children() []Node {
return nil
}
func (d *BadDefinition) SetComments([]*Comment, []*Comment) {
}
func (d *BadDefinition) SetAnnotations(annos *Annotations) {
}
func (d *BadDefinition) SetLocation(loc Location) {
d.Location = loc
}
func (d *BadDefinition) IsBadNode() bool {
return true
}
func (d *BadDefinition) ChildrenBadNode() bool {
return false
}
func (d *BadDefinition) Equals(node Node) bool {
dn, ok := node.(*BadDefinition)
if !ok {
return false
}
if (d == nil && dn != nil) ||
(d != nil && dn == nil) {
return false
} else if d == nil && dn == nil {
return true
}
if d.BadNode != dn.BadNode {
return false
}
return true
}
type StructKeyword struct {
Keyword
}
func (s *StructKeyword) Type() string {
return "StructKeyword"
}
func (s *StructKeyword) Equals(node Node) bool {
sn, ok := node.(*StructKeyword)
if !ok {
return false
}
if (s == nil && sn != nil) ||
(s != nil && sn == nil) {
return false
} else if s == nil && sn == nil {
return true
}
return s.Keyword.Equals(&sn.Keyword)
}
type LCurKeyword struct {
Keyword
}
func (s *LCurKeyword) Type() string {
return "LCurKeyword"
}
func (s *LCurKeyword) Equals(node Node) bool {
sn, ok := node.(*LCurKeyword)
if !ok {
return false
}
if (s == nil && sn != nil) ||
(s != nil && sn == nil) {
return false
} else if s == nil && sn == nil {
return true
}
return s.Keyword.Equals(&sn.Keyword)
}
type RCurKeyword struct {
Keyword
}
func (s *RCurKeyword) Type() string {
return "RCurKeyword"
}
func (s *RCurKeyword) Equals(node Node) bool {
sn, ok := node.(*RCurKeyword)
if !ok {
return false
}
if (s == nil && sn != nil) ||
(s != nil && sn == nil) {
return false
} else if s == nil && sn == nil {
return true
}
return s.Keyword.Equals(&sn.Keyword)
}
type Struct struct {
StructKeyword *StructKeyword
LCurKeyword *LCurKeyword
RCurKeyword *RCurKeyword
Identifier *Identifier
Fields []*Field
Comments []*Comment
EndLineComments []*Comment
Annotations *Annotations
BadNode bool
Location
}
func NewStruct(structKeyword *StructKeyword, lCurKeyword *LCurKeyword, rCurKeyword *RCurKeyword, identifier *Identifier, fields []*Field, loc Location) *Struct {
return &Struct{
StructKeyword: structKeyword,
LCurKeyword: lCurKeyword,
RCurKeyword: rCurKeyword,
Identifier: identifier,
Fields: fields,
Location: loc,
}
}
func NewBadStruct(loc Location) *Struct {
return &Struct{
BadNode: true,
Location: loc,
}
}
func (s *Struct) Type() string {
return "Struct"
}
func (s *Struct) SetComments(comments []*Comment, endLineComments []*Comment) {
s.Comments = comments
s.EndLineComments = endLineComments
}
func (s *Struct) SetAnnotations(annos *Annotations) {
s.Annotations = annos
}
func (s *Struct) Children() []Node {
nodes := []Node{s.StructKeyword, s.LCurKeyword, s.RCurKeyword, s.Identifier}
for i := range s.Fields {
nodes = append(nodes, s.Fields[i])
}
for i := range s.Comments {
nodes = append(nodes, s.Comments[i])
}
for i := range s.EndLineComments {
nodes = append(nodes, s.EndLineComments[i])
}
if s.Annotations != nil {
nodes = append(nodes, s.Annotations)
}
return nodes
}
func (s *Struct) IsBadNode() bool {
return s.BadNode
}
func (s *Struct) ChildrenBadNode() bool {
children := s.Children()
for i := range children {
if children[i].IsBadNode() {
return true
}
if children[i].ChildrenBadNode() {
return true
}
}
return false
}
func (s *Struct) SetLocation(loc Location) {
s.Location = loc
}
func (s *Struct) Equals(node Node) bool {
sn, ok := node.(*Struct)
if !ok {
return false
}
if (s == nil && sn != nil) ||
(s != nil && sn == nil) {
return false
} else if s == nil && sn == nil {
return true
}
if s.BadNode != sn.BadNode {
return false
}
if !s.StructKeyword.Equals(sn.StructKeyword) {
return false
}
if !s.LCurKeyword.Equals(sn.LCurKeyword) {
return false
}
if !s.RCurKeyword.Equals(sn.RCurKeyword) {
return false
}
if !s.Identifier.Equals(sn.Identifier) {
return false
}
if len(s.Fields) != len(sn.Fields) {
return false
}
for i := range s.Fields {
if !s.Fields[i].Equals(sn.Fields[i]) {
return false
}
}
if len(s.Comments) != len(sn.Comments) {
return false
}
for i := range s.Comments {
if !s.Comments[i].Equals(sn.Comments[i]) {
return false
}
}
if len(s.EndLineComments) != len(sn.EndLineComments) {
return false
}
for i := range s.EndLineComments {
if !s.EndLineComments[i].Equals(sn.EndLineComments[i]) {
return false
}
}
if !s.Annotations.Equals(sn.Annotations) {
return false
}
return true
}
type ConstKeyword struct {
Keyword
}
func (c *ConstKeyword) Type() string {
return "ConstKeyword"
}
func (c *ConstKeyword) Equals(node Node) bool {
cn, ok := node.(*ConstKeyword)
if !ok {
return false
}
if (c == nil && cn != nil) ||
(c != nil && cn == nil) {
return false
} else if c == nil && cn == nil {
return true
}
return c.Keyword.Equals(&cn.Keyword)
}
type EqualKeyword struct {
Keyword
}
func NewBadEqualKeyword() *EqualKeyword {
return &EqualKeyword{
Keyword: Keyword{
BadNode: true,
},
}
}
func (e *EqualKeyword) Type() string {
return "EqualKeyword"
}
func (e *EqualKeyword) Equals(node Node) bool {
en, ok := node.(*EqualKeyword)
if !ok {
return false
}
if (e == nil && en != nil) ||
(e != nil && en == nil) {
return false
} else if e == nil && en == nil {
return true
}
return e.Keyword.Equals(&en.Keyword)
}
type ListSeparatorKeyword struct {
Keyword
Text string // , or ;
}
func (e *ListSeparatorKeyword) Type() string {
return "ListSeparator"
}
func (e *ListSeparatorKeyword) Equals(node Node) bool {
en, ok := node.(*ListSeparatorKeyword)
if !ok {
return false
}
if (e == nil && en != nil) ||
(e != nil && en == nil) {
return false
} else if e == nil && en == nil {
return true
}
return e.Keyword.Equals(&en.Keyword)
}
type Const struct {
ConstKeyword *ConstKeyword
EqualKeyword *EqualKeyword
ListSeparatorKeyword *ListSeparatorKeyword // can be nil
Name *Identifier
ConstType *FieldType
Value *ConstValue
Comments []*Comment
EndLineComments []*Comment
Annotations *Annotations
BadNode bool
Location
}
func NewConst(constKeyword *ConstKeyword, equalKeyword *EqualKeyword, listSeparatorKeyword *ListSeparatorKeyword, name *Identifier, t *FieldType, v *ConstValue, loc Location) *Const {
return &Const{
ConstKeyword: constKeyword,
EqualKeyword: equalKeyword,
ListSeparatorKeyword: listSeparatorKeyword,
Name: name,
ConstType: t,
Value: v,
Location: loc,
}
}
func NewBadConst(loc Location) *Const {
return &Const{
BadNode: true,
Location: loc,
}
}
func (c *Const) Type() string {
return "Const"
}
func (c *Const) SetComments(comments []*Comment, endLineComments []*Comment) {
c.Comments = comments
c.EndLineComments = endLineComments
}
func (c *Const) SetAnnotations(annos *Annotations) {
c.Annotations = annos
}
func (c *Const) Children() []Node {
res := []Node{c.ConstKeyword, c.EqualKeyword, c.Name, c.ConstType, c.Value}
if c.ListSeparatorKeyword != nil {
res = append(res, c.ListSeparatorKeyword)
}
for i := range c.Comments {
res = append(res, c.Comments[i])
}
for i := range c.EndLineComments {
res = append(res, c.EndLineComments[i])
}
if c.Annotations != nil {
res = append(res, c.Annotations)
}
return res
}
func (c *Const) IsBadNode() bool {
return c.BadNode
}
func (c *Const) ChildrenBadNode() bool {
children := c.Children()
for i := range children {
if children[i].IsBadNode() {
return true
}
if children[i].ChildrenBadNode() {
return true
}
}
return false
}
func (c *Const) SetLocation(loc Location) {
c.Location = loc
}
func (c *Const) Equals(node Node) bool {
cn, ok := node.(*Const)
if !ok {
return false
}
if (c == nil && cn != nil) ||
(c != nil && cn == nil) {
return false
} else if c == nil && cn == nil {
return true
}
if c.BadNode != cn.BadNode {
return false
}
if !c.ConstKeyword.Equals(cn.ConstKeyword) {
return false
}
if !c.EqualKeyword.Equals(cn.EqualKeyword) {
return false
}
if !c.ListSeparatorKeyword.Equals(cn.ListSeparatorKeyword) {
return false
}
if !c.Name.Equals(cn.Name) {
return false
}
if !c.ConstType.Equals(cn.ConstType) {
return false
}
if !c.Value.Equals(cn.Value) {
return false
}
if len(c.Comments) != len(cn.Comments) {
return false
}
for i := range c.Comments {
if !c.Comments[i].Equals(cn.Comments[i]) {
return false
}
}
if len(c.EndLineComments) != len(cn.EndLineComments) {
return false
}
for i := range c.EndLineComments {
if !c.EndLineComments[i].Equals(cn.EndLineComments[i]) {
return false
}
}
if !c.Annotations.Equals(cn.Annotations) {
return false
}
return true
}
type TypedefKeyword struct {
Keyword
}
func (t *TypedefKeyword) Type() string {
return "TypedefKeyword"
}
func (t *TypedefKeyword) Equals(node Node) bool {
tn, ok := node.(*TypedefKeyword)
if !ok {
return false
}
if (t == nil && tn != nil) ||
(t != nil && tn == nil) {
return false
} else if t == nil && tn == nil {
return true
}
return t.Keyword.Equals(&tn.Keyword)
}
type Typedef struct {
TypedefKeyword *TypedefKeyword
T *FieldType
Alias *Identifier
Comments []*Comment
EndLineComments []*Comment
Annotations *Annotations
BadNode bool
Location
}
func NewTypedef(keyword *TypedefKeyword, t *FieldType, alias *Identifier, loc Location) *Typedef {
return &Typedef{
TypedefKeyword: keyword,
T: t,
Alias: alias,
Location: loc,
}
}
func NewBadTypedef(loc Location) *Typedef {
return &Typedef{
BadNode: true,
Location: loc,
}
}
func (t *Typedef) Type() string {
return "Typedef"
}
func (t *Typedef) SetComments(comments []*Comment, endLineComments []*Comment) {
t.Comments = comments
t.EndLineComments = endLineComments
}
func (t *Typedef) SetAnnotations(annos *Annotations) {
t.Annotations = annos
}
func (t *Typedef) Children() []Node {
nodes := []Node{t.TypedefKeyword, t.T, t.Alias}
for i := range t.Comments {
nodes = append(nodes, t.Comments[i])
}
for i := range t.EndLineComments {
nodes = append(nodes, t.EndLineComments[i])
}
if t.Annotations != nil {
nodes = append(nodes, t.Annotations)
}
return nodes
}
func (t *Typedef) IsBadNode() bool {
return t.BadNode
}
func (t *Typedef) ChildrenBadNode() bool {
children := t.Children()
for i := range children {
if children[i].IsBadNode() {
return true
}
if children[i].ChildrenBadNode() {
return true
}
}
return false
}
func (t *Typedef) SetLocation(loc Location) {
t.Location = loc
}
func (t *Typedef) Equals(node Node) bool {
tn, ok := node.(*Typedef)
if !ok {
return false
}
if (t == nil && tn != nil) ||
(t != nil && tn == nil) {
return false
} else if t == nil && tn == nil {
return true
}
if t.BadNode != tn.BadNode {
return false
}
if !t.TypedefKeyword.Equals(tn.TypedefKeyword) {
return false
}
if !t.T.Equals(tn.T) {
return false
}
if !t.Alias.Equals(tn.Alias) {
return false
}
if len(t.Comments) != len(tn.Comments) {
return false
}
for i := range t.Comments {
if !t.Comments[i].Equals(tn.Comments[i]) {
return false
}
}
if len(t.EndLineComments) != len(tn.EndLineComments) {
return false
}
for i := range t.EndLineComments {
if !t.EndLineComments[i].Equals(tn.EndLineComments[i]) {
return false
}
}
if !t.Annotations.Equals(tn.Annotations) {
return false
}
return true
}
type EnumKeyword struct {
Keyword
}
func (e *EnumKeyword) Type() string {
return "EnumKeyword"
}
func (e *EnumKeyword) Equals(node Node) bool {
en, ok := node.(*EnumKeyword)
if !ok {
return false
}
if (e == nil && en != nil) ||
(e != nil && en == nil) {
return false
} else if e == nil && en == nil {
return true
}
return e.Keyword.Equals(&en.Keyword)
}
type Enum struct {
EnumKeyword *EnumKeyword
LCurKeyword *LCurKeyword
RCurKeyword *RCurKeyword
Name *Identifier
Values []*EnumValue
Comments []*Comment
EndLineComments []*Comment
Annotations *Annotations
BadNode bool
Location
}
func NewEnum(enumKeyword *EnumKeyword, lCurKeyword *LCurKeyword, rCurKeyword *RCurKeyword, name *Identifier, values []*EnumValue, loc Location) *Enum {
return &Enum{
EnumKeyword: enumKeyword,
LCurKeyword: lCurKeyword,
RCurKeyword: rCurKeyword,
Name: name,
Values: values,
Location: loc,
}
}
func NewBadEnum(loc Location) *Enum {
return &Enum{
BadNode: true,
Location: loc,
}
}
func (e *Enum) Type() string {
return "Enum"
}
func (e *Enum) SetComments(comments []*Comment, endlineComments []*Comment) {
e.Comments = comments
e.EndLineComments = endlineComments
}
func (e *Enum) SetAnnotations(annos *Annotations) {
e.Annotations = annos
}
func (e *Enum) Children() []Node {
nodes := []Node{e.Name}
for i := range e.Values {
nodes = append(nodes, e.Values[i])
}
for i := range e.Comments {
nodes = append(nodes, e.Comments[i])
}
for i := range e.EndLineComments {
nodes = append(nodes, e.EndLineComments[i])
}
if e.Annotations != nil {
nodes = append(nodes, e.Annotations)
}
return nodes
}
func (e *Enum) IsBadNode() bool {
return e.BadNode
}
func (e *Enum) ChildrenBadNode() bool {
children := e.Children()
for i := range children {
if children[i].IsBadNode() {
return true
}
if children[i].ChildrenBadNode() {
return true
}
}
return false
}
func (e *Enum) SetLocation(loc Location) {
e.Location = loc
}
func (e *Enum) Equals(node Node) bool {
en, ok := node.(*Enum)
if !ok {
return false
}
if (e == nil && en != nil) ||
(e != nil && en == nil) {
return false
} else if e == nil && en == nil {
return true
}
if e.BadNode != en.BadNode {
return false
}
if !e.EnumKeyword.Equals(en.EnumKeyword) {
return false
}
if !e.LCurKeyword.Equals(en.LCurKeyword) {
return false
}
if !e.RCurKeyword.Equals(en.RCurKeyword) {
return false
}
if !e.Name.Equals(en.Name) {
return false
}
if len(e.Values) != len(en.Values) {
return false
}
for i := range e.Values {
if !e.Values[i].Equals(en.Values[i]) {
return false
}
}
if len(e.Comments) != len(en.Comments) {
return false
}
for i := range e.Comments {
if !e.Comments[i].Equals(en.Comments[i]) {
return false
}
}
if len(e.EndLineComments) != len(en.EndLineComments) {
return false
}
for i := range e.EndLineComments {
if !e.EndLineComments[i].Equals(en.EndLineComments[i]) {
return false
}
}
if !e.Annotations.Equals(en.Annotations) {
return false
}
return true
}
type EnumValue struct {
ListSeparatorKeyword *ListSeparatorKeyword // can be nil
EqualKeyword *EqualKeyword // can be nil
Name *Identifier
ValueNode *ConstValue
Value int64 // Value only record enum value. it is not a ast node
Annotations *Annotations
Comments []*Comment
EndLineComments []*Comment
BadNode bool
Location
}
func NewBadEnumValue(loc Location) *EnumValue {
return &EnumValue{
BadNode: true,
Location: loc,
}
}
func NewEnumValue(listSeparatorKeyword *ListSeparatorKeyword, equalKeyword *EqualKeyword, name *Identifier, valueNode *ConstValue, value int64, annotations *Annotations, loc Location) *EnumValue {
return &EnumValue{
ListSeparatorKeyword: listSeparatorKeyword,
EqualKeyword: equalKeyword,
Name: name,
ValueNode: valueNode,
Value: value,
Annotations: annotations,
Location: loc,
}
}
func (e *EnumValue) Children() []Node {
nodes := []Node{e.Name}
if e.ValueNode != nil {
nodes = append(nodes, e.ValueNode)
}
if e.ListSeparatorKeyword != nil {
nodes = append(nodes, e.ListSeparatorKeyword)
}
if e.EqualKeyword != nil {
nodes = append(nodes, e.EqualKeyword)
}
for i := range e.Comments {
nodes = append(nodes, e.Comments[i])
}
for i := range e.EndLineComments {
nodes = append(nodes, e.EndLineComments[i])
}
if e.Annotations != nil {
nodes = append(nodes, e.Annotations)
}
return nodes
}
func (e *EnumValue) Type() string {
return "EnumValue"
}
func (e *EnumValue) SetComments(comments []*Comment, endLineComments []*Comment) {
e.Comments = comments
e.EndLineComments = endLineComments
}
func (e *EnumValue) IsBadNode() bool {
return e.BadNode
}
func (e *EnumValue) ChildrenBadNode() bool {
children := e.Children()
for i := range children {
if children[i].IsBadNode() {
return true
}
if children[i].ChildrenBadNode() {
return true
}
}
return false
}
func (e *EnumValue) Equals(node Node) bool {
en, ok := node.(*EnumValue)
if !ok {
return false
}
if (e == nil && en != nil) ||
(e != nil && en == nil) {
return false
} else if e == nil && en == nil {
return true
}
if e.BadNode != en.BadNode {
return true
}
// comma 不影响语义
// if !e.ListSeparatorKeyword.Equals(en.ListSeparatorKeyword) {
// return false
// }
if !e.EqualKeyword.Equals(en.EqualKeyword) {
return false
}
if !e.Name.Equals(en.Name) {
return false
}
if !e.ValueNode.Equals(en.ValueNode) {
return false
}
if e.Value != en.Value {
return false
}
if len(e.Comments) != len(en.Comments) {
return false
}
for i := range e.Comments {
if !e.Comments[i].Equals(en.Comments[i]) {
return false
}
}
if len(e.EndLineComments) != len(en.EndLineComments) {
return false
}
for i := range e.EndLineComments {
if !e.EndLineComments[i].Equals(en.EndLineComments[i]) {
return false
}
}
if !e.Annotations.Equals(en.Annotations) {
return false
}
return true
}
type ServiceKeyword struct {
Keyword
}
func (s *ServiceKeyword) Type() string {
return "ServiceKeyword"
}
func (s *ServiceKeyword) Equals(node Node) bool {
sn, ok := node.(*ServiceKeyword)
if !ok {
return false
}
if (s == nil && sn != nil) ||
(s != nil && sn == nil) {
return false
} else if s == nil && sn == nil {
return true
}
return s.Keyword.Equals(&sn.Keyword)
}
type ExtendsKeyword struct {
Keyword
}
func (s *ExtendsKeyword) Type() string {
return "ExtendsKeyword"
}
func (s *ExtendsKeyword) Equals(node Node) bool {
sn, ok := node.(*ExtendsKeyword)
if !ok {
return false
}
if (s == nil && sn != nil) ||
(s != nil && sn == nil) {
return false
} else if s == nil && sn == nil {
return true
}
return s.Keyword.Equals(&sn.Keyword)
}
type Service struct {
ServiceKeyword *ServiceKeyword
ExtendsKeyword *ExtendsKeyword // can be nil
LCurKeyword *LCurKeyword
RCurKeyword *RCurKeyword
Name *Identifier
Extends *Identifier
Functions []*Function
Comments []*Comment
EndLineComments []*Comment
Annotations *Annotations
BadNode bool
Location
}
func NewService(serviceKeyword *ServiceKeyword, extendsKeyword *ExtendsKeyword, lCurKeyword *LCurKeyword, rCurKeyword *RCurKeyword, name *Identifier, extends *Identifier, fns []*Function, loc Location) *Service {
return &Service{
ServiceKeyword: serviceKeyword,
ExtendsKeyword: extendsKeyword,
LCurKeyword: lCurKeyword,
RCurKeyword: rCurKeyword,
Name: name,
Extends: extends,
Functions: fns,
Location: loc,
}
}
func NewBadService(loc Location) *Service {
return &Service{
BadNode: true,
Location: loc,
}
}
func (s *Service) Type() string {
return "Service"
}
func (s *Service) SetComments(comments []*Comment, endLineComments []*Comment) {
s.Comments = comments
s.EndLineComments = endLineComments
}
func (s *Service) SetAnnotations(annos *Annotations) {
s.Annotations = annos
}
func (s *Service) Children() []Node {
nodes := []Node{s.ServiceKeyword, s.LCurKeyword, s.RCurKeyword}
if s.ExtendsKeyword != nil {
nodes = append(nodes, s.ExtendsKeyword)
}
if s.Name != nil {
nodes = append(nodes, s.Name)
}
if s.Extends != nil {
nodes = append(nodes, s.Extends)
}
for i := range s.Functions {
nodes = append(nodes, s.Functions[i])
}
for i := range s.Comments {
nodes = append(nodes, s.Comments[i])
}
for i := range s.EndLineComments {
nodes = append(nodes, s.EndLineComments[i])
}
if s.Annotations != nil {
nodes = append(nodes, s.Annotations)
}
return nodes
}
func (s *Service) IsBadNode() bool {
return s.BadNode
}
func (s *Service) ChildrenBadNode() bool {
children := s.Children()
for i := range children {
if children[i].IsBadNode() {
return true
}
if children[i].ChildrenBadNode() {
return true
}
}
return false
}
func (s *Service) SetLocation(loc Location) {
s.Location = loc
}
func (s *Service) Equals(node Node) bool {
sn, ok := node.(*Service)
if !ok {
return false
}
if (s == nil && sn != nil) ||
(s != nil && sn == nil) {
return false
} else if s == nil && sn == nil {
return true
}
if s.BadNode != sn.BadNode {
return false
}
if !s.ServiceKeyword.Equals(sn.ServiceKeyword) {
return false
}
if !s.ExtendsKeyword.Equals(sn.ExtendsKeyword) {
return false
}
if !s.LCurKeyword.Equals(sn.LCurKeyword) {
return false
}
if !s.RCurKeyword.Equals(sn.RCurKeyword) {
return false
}
if !s.Name.Equals(sn.Name) {
return false
}
if !s.Extends.Equals(sn.Extends) {
return false
}
if len(s.Functions) != len(sn.Functions) {
return false
}
for i := range s.Functions {
if !s.Functions[i].Equals(sn.Functions[i]) {
return false
}
}
if len(s.Comments) != len(sn.Comments) {
return false
}
for i := range s.Comments {
if !s.Comments[i].Equals(sn.Comments[i]) {
return false
}
}
if len(s.EndLineComments) != len(sn.EndLineComments) {
return false
}
for i := range s.EndLineComments {
if !s.EndLineComments[i].Equals(sn.EndLineComments[i]) {
return false
}
}
if !s.Annotations.Equals(sn.Annotations) {
return false
}
return true
}
type OnewayKeyword struct {
Keyword
}
func (o *OnewayKeyword) Type() string {
return "OnewayKeyword"
}
func (o *OnewayKeyword) Equals(node Node) bool {
on, ok := node.(*OnewayKeyword)
if !ok {
return false
}
if (o == nil && on != nil) ||
(o != nil && on == nil) {
return false
} else if o == nil && on == nil {
return true
}
return o.Keyword.Equals(&on.Keyword)
}
type LParKeyword struct {
Keyword
}
func (l *LParKeyword) Type() string {
return "LParKeyword"
}
func (l *LParKeyword) Equals(node Node) bool {
ln, ok := node.(*LParKeyword)
if !ok {
return false
}
if (l == nil && ln != nil) ||
(l != nil && ln == nil) {
return false
} else if l == nil && ln == nil {
return true
}
return l.Keyword.Equals(&ln.Keyword)
}
type RParKeyword struct {
Keyword
}
func (r *RParKeyword) Type() string {
return "RParKeyword"
}
func (r *RParKeyword) Equals(node Node) bool {
rn, ok := node.(*RParKeyword)
if !ok {
return false
}
if (r == nil && rn != nil) ||
(r != nil && rn == nil) {
return false
} else if r == nil && rn == nil {
return true
}
return r.Keyword.Equals(&rn.Keyword)
}
type VoidKeyword struct {
Keyword
}
func (v *VoidKeyword) Type() string {
return "VoidKeyword"
}
func (v *VoidKeyword) Equals(node Node) bool {
vn, ok := node.(*VoidKeyword)
if !ok {
return false
}
if (v == nil && vn != nil) ||
(v != nil && vn == nil) {
return false
} else if v == nil && vn == nil {
return true
}
return v.Keyword.Equals(&vn.Keyword)
}
type ThrowsKeyword struct {
Keyword
}
func (t *ThrowsKeyword) Type() string {
return "ThrowsKeyword"
}
func (t *ThrowsKeyword) Equals(node Node) bool {
tn, ok := node.(*ThrowsKeyword)
if !ok {
return false
}
if (t == nil && tn != nil) ||
(t != nil && tn == nil) {
return false
} else if t == nil && tn == nil {
return true
}
return t.Keyword.Equals(&tn.Keyword)
}
type Throws struct {
ThrowsKeyword *ThrowsKeyword
LParKeyword *LParKeyword
RParKeyword *RParKeyword
Fields []*Field
BadNode bool
Location
}
func NewThrows(throwsKeyword *ThrowsKeyword, lparKeyword *LParKeyword, rparKeyword *RParKeyword, fields []*Field, loc Location) *Throws {
return &Throws{
ThrowsKeyword: throwsKeyword,
LParKeyword: lparKeyword,
RParKeyword: rparKeyword,
Fields: fields,
Location: loc,
}
}
func (t *Throws) Type() string {
return "Throws"
}
func (t *Throws) IsBadNode() bool {
return t.BadNode
}
func (t *Throws) ChildrenBadNode() bool {
children := t.Children()
for i := range children {
if children[i].IsBadNode() {
return true
}
if children[i].ChildrenBadNode() {
return true
}
}
return false
}
func (t *Throws) Children() []Node {
nodes := []Node{t.ThrowsKeyword, t.LParKeyword, t.RParKeyword}
for i := range t.Fields {
nodes = append(nodes, t.Fields[i])
}
return nodes
}
func (t *Throws) Equals(node Node) bool {
tn, ok := node.(*Throws)
if !ok {
return false
}
if (t == nil && tn != nil) ||
(t != nil && tn == nil) {
return false
} else if t == nil && tn == nil {
return true
}
if t.BadNode != tn.BadNode {
return false
}
if !t.ThrowsKeyword.Equals(tn.ThrowsKeyword) {
return false
}
if !t.LParKeyword.Equals(tn.LParKeyword) {
return false
}
if !t.RParKeyword.Equals(tn.RParKeyword) {
return false
}
if len(t.Fields) != len(tn.Fields) {
return false
}
for i := range t.Fields {
if !t.Fields[i].Equals(tn.Fields[i]) {
return false
}
}
return true
}
type Function struct {
LParKeyword *LParKeyword
RParKeyword *RParKeyword
ListSeparatorKeyword *ListSeparatorKeyword // can be nil
Name *Identifier
Oneway *OnewayKeyword // can be nil
Void *VoidKeyword // can be nil
FunctionType *FieldType
Arguments []*Field
Throws *Throws
Comments []*Comment
EndLineComments []*Comment
Annotations *Annotations
BadNode bool
Location
}
func NewFunction(lParKeyword *LParKeyword, rParKeyword *RParKeyword, listSeparatorKeyword *ListSeparatorKeyword, name *Identifier, oneway *OnewayKeyword, void *VoidKeyword, ft *FieldType, args []*Field, throws *Throws, comments []*Comment, endlineComments []*Comment, annotations *Annotations, loc Location) *Function {
return &Function{
LParKeyword: lParKeyword,
RParKeyword: rParKeyword,
ListSeparatorKeyword: listSeparatorKeyword,
Name: name,
Oneway: oneway,
Void: void,
FunctionType: ft,
Arguments: args,
Throws: throws,
Comments: comments,
EndLineComments: endlineComments,
Annotations: annotations,
Location: loc,
}
}
func NewBadFunction(loc Location) *Function {
return &Function{
BadNode: true,
Location: loc,
}
}
func (f *Function) Children() []Node {
nodes := []Node{f.LParKeyword, f.RParKeyword}
if f.Oneway != nil {
nodes = append(nodes, f.Oneway)
}
if f.Void != nil {
nodes = append(nodes, f.Void)
}
if f.ListSeparatorKeyword != nil {
nodes = append(nodes, f.ListSeparatorKeyword)
}
if f.Name != nil {
nodes = append(nodes, f.Name)
}
if f.FunctionType != nil {
nodes = append(nodes, f.FunctionType)
}
for i := range f.Arguments {
nodes = append(nodes, f.Arguments[i])
}
if f.Throws != nil {
nodes = append(nodes, f.Throws)
}
for i := range f.Comments {
nodes = append(nodes, f.Comments[i])
}
for i := range f.EndLineComments {
nodes = append(nodes, f.EndLineComments[i])
}
if f.Annotations != nil {
nodes = append(nodes, f.Annotations)
}
return nodes
}
func (f *Function) Type() string {
return "Function"
}
func (f *Function) IsBadNode() bool {
return f.BadNode
}
func (f *Function) ChildrenBadNode() bool {
children := f.Children()
for i := range children {
if children[i].IsBadNode() {
return true
}
if children[i].ChildrenBadNode() {
return true
}
}
return false
}
func (f *Function) Equals(node Node) bool {
fn, ok := node.(*Function)
if !ok {
return false
}
if (f == nil && fn != nil) ||
(f != nil && fn == nil) {
return false
} else if f == nil && fn == nil {
return true
}
if f.BadNode != fn.BadNode {
return false
}
if !f.LParKeyword.Equals(fn.LParKeyword) {
return false
}
if !f.RParKeyword.Equals(fn.RParKeyword) {
return false
}
// 格式化场景,会变更默认的分隔符,所以在 equals 时不需要比较
// if !f.ListSeparatorKeyword.Equals(fn.ListSeparatorKeyword) {
// return false
// }
if !f.Name.Equals(fn.Name) {
return false
}
if !f.Oneway.Equals(fn.Oneway) {
return false
}
if !f.Void.Equals(fn.Void) {
return false
}
if !f.FunctionType.Equals(fn.FunctionType) {
return false
}
if len(f.Arguments) != len(fn.Arguments) {
return false
}
for i := range f.Arguments {
if !f.Arguments[i].Equals(fn.Arguments[i]) {
return false
}
}
if !f.Throws.Equals(fn.Throws) {
return false
}
if len(f.Comments) != len(fn.Comments) {
return false
}
for i := range f.Comments {
if !f.Comments[i].Equals(fn.Comments[i]) {
return false
}
}
if len(f.EndLineComments) != len(fn.EndLineComments) {
return false
}
for i := range f.EndLineComments {
if !f.EndLineComments[i].Equals(fn.EndLineComments[i]) {
return false
}
}
if !f.Annotations.Equals(fn.Annotations) {
return false
}
return true
}
type UnionKeyword struct {
Keyword
}
func (u *UnionKeyword) Type() string {
return "UnionKeyword"
}
func (u *UnionKeyword) Equals(node Node) bool {
un, ok := node.(*UnionKeyword)
if !ok {
return false
}
if (u == nil && un != nil) ||
(u != nil && un == nil) {
return false
} else if u == nil && un == nil {
return true
}
return u.Keyword.Equals(&un.Keyword)
}
type Union struct {
UnionKeyword *UnionKeyword
LCurKeyword *LCurKeyword
RCurKeyword *RCurKeyword
Name *Identifier
Fields []*Field
Comments []*Comment
EndLineComments []*Comment
Annotations *Annotations
BadNode bool
Location
}
func NewUnion(unionKeyword *UnionKeyword, lCurKeyword *LCurKeyword, rCurKeyword *RCurKeyword, name *Identifier, fields []*Field, loc Location) *Union {
return &Union{
UnionKeyword: unionKeyword,
LCurKeyword: lCurKeyword,
RCurKeyword: rCurKeyword,
Name: name,
Fields: fields,
Location: loc,
}
}
func NewBadUnion(loc Location) *Union {
return &Union{
BadNode: true,
Location: loc,
}
}
func (u *Union) Type() string {
return "Union"
}
func (u *Union) SetComments(comments []*Comment, endLineComments []*Comment) {
u.Comments = comments
u.EndLineComments = endLineComments
}
func (u *Union) SetAnnotations(annos *Annotations) {
u.Annotations = annos
}
func (u *Union) Children() []Node {
nodes := []Node{u.Name, u.UnionKeyword, u.LCurKeyword, u.RCurKeyword}
for i := range u.Fields {
nodes = append(nodes, u.Fields[i])
}
for i := range u.Comments {
nodes = append(nodes, u.Comments[i])
}
for i := range u.EndLineComments {
nodes = append(nodes, u.EndLineComments[i])
}
for i := range u.Comments {
nodes = append(nodes, u.Comments[i])
}
for i := range u.EndLineComments {
nodes = append(nodes, u.EndLineComments[i])
}
if u.Annotations != nil {
nodes = append(nodes, u.Annotations)
}
return nodes
}
func (u *Union) IsBadNode() bool {
return u.BadNode
}
func (u *Union) ChildrenBadNode() bool {
children := u.Children()
for i := range children {
if children[i].IsBadNode() {
return true
}
if children[i].ChildrenBadNode() {
return true
}
}
return false
}
func (u *Union) SetLocation(loc Location) {
u.Location = loc
}
func (u *Union) Equals(node Node) bool {
un, ok := node.(*Union)
if !ok {
return false
}
if (u == nil && un != nil) ||
(u != nil && un == nil) {
return false
} else if u == nil && un == nil {
return true
}
if !u.UnionKeyword.Equals(un.UnionKeyword) {
return false
}
if !u.LCurKeyword.Equals(un.LCurKeyword) {
return false
}
if !u.RCurKeyword.Equals(un.RCurKeyword) {
return false
}
if !u.Name.Equals(un.Name) {
return false
}
if len(u.Fields) != len(un.Fields) {
return false
}
for i := range u.Fields {
if !u.Fields[i].Equals(un.Fields[i]) {
return false
}
}
if len(u.Comments) != len(un.Comments) {
return false
}
for i := range u.Comments {
if !u.Comments[i].Equals(un.Comments[i]) {
return false
}
}
if len(u.EndLineComments) != len(un.EndLineComments) {
return false
}
for i := range u.EndLineComments {
if !u.EndLineComments[i].Equals(un.EndLineComments[i]) {
return false
}
}
if !u.Annotations.Equals(un.Annotations) {
return false
}
return true
}
type ExceptionKeyword struct {
Keyword
}
func (e *ExceptionKeyword) Type() string {
return "ExceptionKeyword"
}
func (e *ExceptionKeyword) Equals(node Node) bool {
en, ok := node.(*ExceptionKeyword)
if !ok {
return false
}
if (e == nil && en != nil) ||
(e != nil && en == nil) {
return false
} else if e == nil && en == nil {
return true
}
return e.Keyword.Equals(&en.Keyword)
}
type Exception struct {
ExceptionKeyword *ExceptionKeyword
LCurKeyword *LCurKeyword
RCurKeyword *RCurKeyword
Name *Identifier
Fields []*Field
Comments []*Comment
EndLineComments []*Comment
Annotations *Annotations
BadNode bool
Location
}
func NewException(exceptionKeyword *ExceptionKeyword, lCurKeyword *LCurKeyword, rCurKeyword *RCurKeyword, name *Identifier, fields []*Field, loc Location) *Exception {
return &Exception{
ExceptionKeyword: exceptionKeyword,
LCurKeyword: lCurKeyword,
RCurKeyword: rCurKeyword,
Name: name,
Fields: fields,
Location: loc,
}
}
func NewBadException(loc Location) *Exception {
return &Exception{
BadNode: true,
Location: loc,
}
}
func (e *Exception) Type() string {
return "Exception"
}
func (e *Exception) SetComments(comments []*Comment, endLineComments []*Comment) {
e.Comments = comments
e.EndLineComments = endLineComments
}
func (e *Exception) SetAnnotations(annos *Annotations) {
e.Annotations = annos
}
func (e *Exception) Children() []Node {
nodes := []Node{e.Name, e.ExceptionKeyword, e.LCurKeyword, e.RCurKeyword}
for i := range e.Fields {
nodes = append(nodes, e.Fields[i])
}
for i := range e.Comments {
nodes = append(nodes, e.Comments[i])
}
for i := range e.EndLineComments {
nodes = append(nodes, e.EndLineComments[i])
}
if e.Annotations != nil {
nodes = append(nodes, e.Annotations)
}
return nodes
}
func (e *Exception) IsBadNode() bool {
return e.BadNode
}
func (e *Exception) ChildrenBadNode() bool {
children := e.Children()
for i := range children {
if children[i].IsBadNode() {
return true
}
if children[i].ChildrenBadNode() {
return true
}
}
return false
}
func (e *Exception) SetLocation(loc Location) {
e.Location = loc
}
func (e *Exception) Equals(node Node) bool {
en, ok := node.(*Exception)
if !ok {
return false
}
if (e == nil && en != nil) ||
(e != nil && en == nil) {
return false
} else if e == nil && en == nil {
return true
}
if e.BadNode != en.BadNode {
return false
}
if !e.ExceptionKeyword.Equals(en.ExceptionKeyword) {
return false
}
if !e.LCurKeyword.Equals(en.LCurKeyword) {
return false
}
if !e.RCurKeyword.Equals(en.RCurKeyword) {
return false
}
if !e.Name.Equals(en.Name) {
return false
}
if len(e.Fields) != len(en.Fields) {
return false
}
for i := range e.Fields {
if !e.Fields[i].Equals(en.Fields[i]) {
return false
}
}
if len(e.Comments) != len(en.Comments) {
return false
}
for i := range e.Comments {
if !e.Comments[i].Equals(en.Comments[i]) {
return false
}
}
if len(e.EndLineComments) != len(en.EndLineComments) {
return false
}
for i := range e.EndLineComments {
if !e.EndLineComments[i].Equals(en.EndLineComments[i]) {
return false
}
}
if !e.Annotations.Equals(en.Annotations) {
return false
}
return true
}
type IdentifierName struct {
Text string
BadNode bool
Location
}
func NewIdentifierName(name string, loc Location) *IdentifierName {
return &IdentifierName{
Text: name,
Location: loc,
BadNode: name == "",
}
}
func (i *IdentifierName) Children() []Node {
return nil
}
func (i *IdentifierName) Type() string {
return "IdentifierName"
}
func (i *IdentifierName) IsBadNode() bool {
return i.BadNode
}
func (i *IdentifierName) ChildrenBadNode() bool {
children := i.Children()
for i := range children {
if children[i].IsBadNode() {
return true
}
if children[i].ChildrenBadNode() {
return true
}
}
return false
}
func (i *IdentifierName) Equals(node Node) bool {
in, ok := node.(*IdentifierName)
if !ok {
return false
}
if (i == nil && in != nil) ||
(i != nil && in == nil) {
return false
} else if i == nil && in == nil {
return true
}
if i.BadNode != in.BadNode {
return false
}
if i.Text != in.Text {
return false
}
return true
}
type Identifier struct {
Name *IdentifierName
Comments []*Comment
BadNode bool
Location
}
func NewIdentifier(name *IdentifierName, comments []*Comment, loc Location) *Identifier {
id := &Identifier{
Name: name,
Comments: comments,
Location: loc,
BadNode: name == nil || name.BadNode,
}
return id
}
func NewBadIdentifier(loc Location) *Identifier {
return &Identifier{
BadNode: true,
Location: loc,
}
}
func (i *Identifier) ToFieldType() *FieldType {
t := &FieldType{
TypeName: &TypeName{
Name: i.Name.Text,
Location: i.Name.Location,
},
Location: i.Location,
}
return t
}
func (i *Identifier) Children() []Node {
var nodes []Node
for _, com := range i.Comments {
nodes = append(nodes, com)
}
nodes = append(nodes, i.Name)
return nodes
}
func (i *Identifier) Type() string {
return "Identifier"
}
func (i *Identifier) IsBadNode() bool {
return i.BadNode
}
func (i *Identifier) ChildrenBadNode() bool {
children := i.Children()
for i := range children {
if children[i].IsBadNode() {
return true
}
if children[i].ChildrenBadNode() {
return true
}
}
return false
}
func (i *Identifier) Equals(node Node) bool {
in, ok := node.(*Identifier)
if !ok {
return false
}
if (i == nil && in != nil) ||
(i != nil && in == nil) {
return false
} else if i == nil && in == nil {
return true
}
if i.BadNode != in.BadNode {
return false
}
if !i.Name.Equals(in.Name) {
return false
}
if len(i.Comments) != len(in.Comments) {
return false
}
for n := range i.Comments {
if !i.Comments[n].Equals(in.Comments[n]) {
return false
}
}
return true
}
func ConvertPosition(pos position) Position {
return Position{
Line: pos.line,
Col: pos.col,
Offset: pos.offset,
}
}
type Field struct {
Index *FieldIndex
RequiredKeyword *RequiredKeyword
FieldType *FieldType
Identifier *Identifier
ConstValue *ConstValue
EqualKeyword *EqualKeyword // can be nil
ListSeparatorKeyword *ListSeparatorKeyword // can be nil
Comments []*Comment
EndLineComments []*Comment
Annotations *Annotations
BadNode bool
Location
}
func NewField(equalKeyword *EqualKeyword, listSeparatorKeyword *ListSeparatorKeyword, comments []*Comment, endLineComments []*Comment, annotations *Annotations, index *FieldIndex, required *RequiredKeyword, fieldType *FieldType, identifier *Identifier, constValue *ConstValue, loc Location) *Field {
field := &Field{
EqualKeyword: equalKeyword,
ListSeparatorKeyword: listSeparatorKeyword,
Comments: comments,
EndLineComments: endLineComments,
Annotations: annotations,
Index: index,
RequiredKeyword: required,
FieldType: fieldType,
Identifier: identifier,
ConstValue: constValue,
Location: loc,
}
return field
}
func NewBadField(loc Location) *Field {
return &Field{
BadNode: true,
Location: loc,
}
}
func (f *Field) Children() []Node {
var res []Node
if f.RequiredKeyword != nil {
res = append(res, f.RequiredKeyword)
}
if f.FieldType != nil {
res = append(res, f.FieldType)
}
if f.Identifier != nil {
res = append(res, f.Identifier)
}
if f.ConstValue != nil {
res = append(res, f.ConstValue)
}
if f.EqualKeyword != nil {
res = append(res, f.EqualKeyword)
}
if f.ListSeparatorKeyword != nil {
res = append(res, f.ListSeparatorKeyword)
}
for i := range f.Comments {
res = append(res, f.Comments[i])
}
for i := range f.EndLineComments {
res = append(res, f.EndLineComments[i])
}
if f.Annotations != nil {
res = append(res, f.Annotations)
}
return res
}
func (f *Field) Type() string {
return "Field"
}
func (f *Field) IsBadNode() bool {
return f.BadNode
}
func (f *Field) ChildrenBadNode() bool {
children := f.Children()
for i := range children {
if children[i].IsBadNode() {
return true
}
if children[i].ChildrenBadNode() {
return true
}
}
return false
}
func (f *Field) Equals(node Node) bool {
fn, ok := node.(*Field)
if !ok {
return false
}
if (f == nil && fn != nil) ||
(f != nil && fn == nil) {
return false
} else if f == nil && fn == nil {
return true
}
if f.BadNode != fn.BadNode {
return false
}
if !f.Index.Equals(fn.Index) {
return false
}
if !f.RequiredKeyword.Equals(fn.RequiredKeyword) {
return false
}
if !f.FieldType.Equals(fn.FieldType) {
return false
}
if !f.Identifier.Equals(fn.Identifier) {
return false
}
if !f.ConstValue.Equals(fn.ConstValue) {
return false
}
if !f.EqualKeyword.Equals(fn.EqualKeyword) {
return false
}
// 末尾的 , 不影响语义,暂时注释掉
// if !f.ListSeparatorKeyword.Equals(fn.ListSeparatorKeyword) {
// return false
// }
if len(f.Comments) != len(fn.Comments) {
return false
}
for i := range f.Comments {
if !f.Comments[i].Equals(fn.Comments[i]) {
return false
}
}
if len(f.EndLineComments) != len(fn.EndLineComments) {
return false
}
for i := range f.EndLineComments {
if !f.EndLineComments[i].Equals(fn.EndLineComments[i]) {
return false
}
}
if !f.Annotations.Equals(fn.Annotations) {
return false
}
return true
}
type ColonKeyword struct {
Keyword
}
func (c *ColonKeyword) Type() string {
return "ColonKeyword"
}
func (c *ColonKeyword) Equals(node Node) bool {
cn, ok := node.(*ColonKeyword)
if !ok {
return false
}
if (c == nil && cn != nil) ||
(c != nil && cn == nil) {
return false
} else if c == nil && cn == nil {
return true
}
return c.Keyword.Equals(&cn.Keyword)
}
type FieldIndex struct {
ColonKeyword *ColonKeyword
Value int
Comments []*Comment
BadNode bool
Location
}
func NewFieldIndex(ColonKeyword *ColonKeyword, v int, comments []*Comment, loc Location) *FieldIndex {
return &FieldIndex{
ColonKeyword: ColonKeyword,
Value: v,
Comments: comments,
Location: loc,
}
}
func NewBadFieldIndex(loc Location) *FieldIndex {
return &FieldIndex{
BadNode: true,
Location: loc,
}
}
func (f *FieldIndex) Children() []Node {
return nil
}
func (f *FieldIndex) Type() string {
return "FieldIndex"
}
func (f *FieldIndex) IsBadNode() bool {
return f.BadNode
}
func (f *FieldIndex) ChildrenBadNode() bool {
children := f.Children()
for i := range children {
if children[i].IsBadNode() {
return true
}
if children[i].ChildrenBadNode() {
return true
}
}
return false
}
func (f *FieldIndex) Equals(node Node) bool {
fn, ok := node.(*FieldIndex)
if !ok {
return false
}
if (f == nil && fn != nil) ||
(f != nil && fn == nil) {
return false
} else if f == nil && fn == nil {
return true
}
if f.BadNode != fn.BadNode {
return false
}
if !f.ColonKeyword.Equals(fn.ColonKeyword) {
return false
}
if f.Value != fn.Value {
return false
}
if len(f.Comments) != len(fn.Comments) {
return false
}
for i := range f.Comments {
if !f.Comments[i].Equals(fn.Comments[i]) {
return false
}
}
return true
}
type RequiredKeyword struct {
Keyword
}
func (r *RequiredKeyword) Type() string {
return "RequiredKeyword"
}
func (r *RequiredKeyword) Equals(node Node) bool {
rn, ok := node.(*RequiredKeyword)
if !ok {
return false
}
if (r == nil && rn != nil) ||
(r != nil && rn == nil) {
return false
} else if r == nil && rn == nil {
return true
}
return r.Keyword.Equals(&rn.Keyword)
}
type LPointKeyword struct {
Keyword
}
func (l *LPointKeyword) Type() string {
return "LPointKeyword"
}
func (l *LPointKeyword) Equals(node Node) bool {
ln, ok := node.(*LPointKeyword)
if !ok {
return false
}
if (l == nil && ln != nil) ||
(l != nil && ln == nil) {
return false
} else if l == nil && ln == nil {
return true
}
return l.Keyword.Equals(&ln.Keyword)
}
type RPointKeyword struct {
Keyword
}
func (r *RPointKeyword) Type() string {
return "RPointKeyword"
}
func (r *RPointKeyword) Equals(node Node) bool {
rn, ok := node.(*RPointKeyword)
if !ok {
return false
}
if (r == nil && rn != nil) ||
(r != nil && rn == nil) {
return false
} else if r == nil && rn == nil {
return true
}
return r.Keyword.Equals(&rn.Keyword)
}
type CommaKeyword struct {
Keyword
}
func (r *CommaKeyword) Type() string {
return "CommaKeyword"
}
func (r *CommaKeyword) Equals(node Node) bool {
rn, ok := node.(*CommaKeyword)
if !ok {
return false
}
if (r == nil && rn != nil) ||
(r != nil && rn == nil) {
return false
} else if r == nil && rn == nil {
return true
}
return r.Keyword.Equals(&rn.Keyword)
}
type CppTypeKeyword struct {
Keyword
}
func (r *CppTypeKeyword) Type() string {
return "CppTypeKeyword"
}
func (r *CppTypeKeyword) Equals(node Node) bool {
rn, ok := node.(*CppTypeKeyword)
if !ok {
return false
}
if (r == nil && rn != nil) ||
(r != nil && rn == nil) {
return false
} else if r == nil && rn == nil {
return true
}
return r.Keyword.Equals(&rn.Keyword)
}
type CppType struct {
CppTypeKeyword *CppTypeKeyword
Literal *Literal
BadNode bool
Location
}
func NewCppType(cppTypeKeyword *CppTypeKeyword, literal *Literal, loc Location) *CppType {
return &CppType{
CppTypeKeyword: cppTypeKeyword,
Literal: literal,
Location: loc,
}
}
func (c *CppType) Type() string {
return "CppType"
}
func (c *CppType) Children() []Node {
return []Node{c.CppTypeKeyword, c.Literal}
}
func (c *CppType) IsBadNode() bool {
return c.IsBadNode()
}
func (c *CppType) ChildrenBadNode() bool {
children := c.Children()
for i := range children {
if children[i].IsBadNode() {
return true
}
if children[i].ChildrenBadNode() {
return true
}
}
return false
}
func (c *CppType) Equals(node Node) bool {
cn, ok := node.(*CppType)
if !ok {
return false
}
if (c == nil && cn != nil) ||
(c != nil && cn == nil) {
return false
} else if c == nil && cn == nil {
return true
}
if c.BadNode != cn.BadNode {
return false
}
if !c.CppTypeKeyword.Equals(cn.CppTypeKeyword) {
return false
}
if !c.Literal.Equals(cn.Literal) {
return false
}
return true
}
type FieldType struct {
TypeName *TypeName
// only exist when TypeName is map or set or list
KeyType *FieldType
// only exist when TypeName is map
ValueType *FieldType
// only exist in map, set, list. can be nil
CppType *CppType
// only exist in map, set, list
LPointKeyword *LPointKeyword
// only exist in map, set, list
RPointKeyword *RPointKeyword
// only exist in map
CommaKeyword *CommaKeyword
Annotations *Annotations
BadNode bool
Location
}
func NewFieldType(lpointKeyword *LPointKeyword, rpointKeyword *RPointKeyword, commaKeyword *CommaKeyword, cppType *CppType, typeName *TypeName, keyType *FieldType, valueType *FieldType, loc Location) *FieldType {
return &FieldType{
LPointKeyword: lpointKeyword,
RPointKeyword: rpointKeyword,
CommaKeyword: commaKeyword,
CppType: cppType,
TypeName: typeName,
KeyType: keyType,
ValueType: valueType,
Location: loc,
}
}
func (c *FieldType) Children() []Node {
nodes := make([]Node, 0, 1)
nodes = append(nodes, c.TypeName)
if c.KeyType != nil {
nodes = append(nodes, c.KeyType)
}
if c.ValueType != nil {
nodes = append(nodes, c.ValueType)
}
return nodes
}
func (c *FieldType) Type() string {
return "FieldType"
}
func (c *FieldType) IsBadNode() bool {
return c.BadNode
}
func (c *FieldType) ChildrenBadNode() bool {
children := c.Children()
for i := range children {
if children[i].IsBadNode() {
return true
}
if children[i].ChildrenBadNode() {
return true
}
}
return false
}
func (c *FieldType) Equals(node Node) bool {
fn, ok := node.(*FieldType)
if !ok {
return false
}
if (c == nil && fn != nil) ||
(c != nil && fn == nil) {
return false
} else if c == nil && fn == nil {
return true
}
if c.BadNode != fn.BadNode {
return false
}
if !c.TypeName.Equals(fn.TypeName) {
return false
}
if !c.KeyType.Equals(fn.KeyType) {
return false
}
if !c.ValueType.Equals(fn.ValueType) {
return false
}
if !c.CppType.Equals(fn.CppType) {
return false
}
if !c.LPointKeyword.Equals(fn.LPointKeyword) {
return false
}
if !c.RPointKeyword.Equals(fn.RPointKeyword) {
return false
}
if !c.CommaKeyword.Equals(fn.CommaKeyword) {
return false
}
if !c.Annotations.Equals(fn.Annotations) {
return false
}
return true
}
type TypeName struct {
// TypeName can be:
// container type: map, set, list
// base type: bool, byte, i8, i16, i32, i64, double, string, binary, uuid
// struct, enum, union, exception, identifier
Name string
Comments []*Comment
BadNode bool
Location
}
func NewTypeName(name string, pos position) *TypeName {
t := &TypeName{
Name: name,
Location: NewLocation(pos, name),
}
return t
}
func (t *TypeName) Children() []Node {
var nodes []Node
for i := range t.Comments {
nodes = append(nodes, t.Comments[i])
}
return nodes
}
func (t *TypeName) Type() string {
return "TypeName"
}
func (t *TypeName) IsBadNode() bool {
return t.BadNode
}
func (t *TypeName) ChildrenBadNode() bool {
children := t.Children()
for i := range children {
if children[i].IsBadNode() {
return true
}
if children[i].ChildrenBadNode() {
return true
}
}
return false
}
func (t *TypeName) Equals(node Node) bool {
tn, ok := node.(*TypeName)
if !ok {
return false
}
if (t == nil && tn != nil) ||
(t != nil && tn == nil) {
return false
} else if t == nil && tn == nil {
return true
}
if t.BadNode != tn.BadNode {
return false
}
if t.Name != tn.Name {
return false
}
if len(t.Comments) != len(tn.Comments) {
return false
}
for i := range t.Comments {
if !t.Comments[i].Equals(tn.Comments[i]) {
return false
}
}
return true
}
type LBrkKeyword struct {
Keyword
}
func (l *LBrkKeyword) Type() string {
return "LBrkKeyword"
}
func (l *LBrkKeyword) Equals(node Node) bool {
ln, ok := node.(*LBrkKeyword)
if !ok {
return false
}
if (l == nil && ln != nil) ||
(l != nil && ln == nil) {
return false
} else if l == nil && ln == nil {
return true
}
return l.Keyword.Equals(&ln.Keyword)
}
type RBrkKeyword struct {
Keyword
}
func (l *RBrkKeyword) Type() string {
return "RBrkKeyword"
}
func (l *RBrkKeyword) Equals(node Node) bool {
ln, ok := node.(*RBrkKeyword)
if !ok {
return false
}
if (l == nil && ln != nil) ||
(l != nil && ln == nil) {
return false
} else if l == nil && ln == nil {
return true
}
return l.Keyword.Equals(&ln.Keyword)
}
type ConstValue struct {
// TypeName can be: list, map, pair, string, identifier, i64, double
TypeName string
// Value is the actual value or identifier name
Value any
// ValueInText is the user input value
// it is used for i64 and double type value
ValueInText string
// only exist when TypeName is map
Key any
// exist in list
LBrkKeyword *LBrkKeyword
RBrkKeyword *RBrkKeyword
// exist in map
LCurKeyword *LCurKeyword
RCurKeyword *RCurKeyword
// exist in list, map item
ListSeparatorKeyword *ListSeparatorKeyword
// exist in map item
ColonKeyword *ColonKeyword
Comments []*Comment
BadNode bool
Location
}
func NewConstValue(typeName string, value any, loc Location) *ConstValue {
return &ConstValue{
TypeName: typeName,
Value: value,
Location: loc,
}
}
func NewBadConstValue(loc Location) *ConstValue {
return &ConstValue{
BadNode: true,
Location: loc,
}
}
func NewBadIntConstValue(loc Location) *ConstValue {
return &ConstValue{
TypeName: "i64",
BadNode: true,
Value: int64(0),
Location: loc,
}
}
func NewMapConstValue(key, value *ConstValue, loc Location) *ConstValue {
return &ConstValue{
TypeName: "pair",
Key: key,
Value: value,
Location: loc,
}
}
func (c *ConstValue) SetComments(comments []*Comment) {
c.Comments = comments
}
// TODO(jpf): nodes of key, value
func (c *ConstValue) Children() []Node {
return nil
}
func (c *ConstValue) Type() string {
return "ConstValue"
}
func (c *ConstValue) IsBadNode() bool {
return c.BadNode
}
func (c *ConstValue) ChildrenBadNode() bool {
children := c.Children()
for i := range children {
if children[i].IsBadNode() {
return true
}
if children[i].ChildrenBadNode() {
return true
}
}
return false
}
func (c *ConstValue) Equals(node Node) bool {
cn, ok := node.(*ConstValue)
if !ok {
return false
}
if (c == nil && cn != nil) ||
(c != nil && cn == nil) {
return false
} else if c == nil && cn == nil {
return true
}
if c.BadNode != cn.BadNode {
return false
}
if c.TypeName != cn.TypeName {
return false
}
// TODO(jpf): any 的比对
// if c.Value != cn.Value {
// return false
// }
if c.ValueInText != cn.ValueInText {
return false
}
// TODO(jpf): any 的比对
// if c.Key != cn.Key {
// return false
// }
if !c.LBrkKeyword.Equals(cn.LBrkKeyword) {
return false
}
if !c.RBrkKeyword.Equals(cn.RBrkKeyword) {
return false
}
if !c.LCurKeyword.Equals(cn.LCurKeyword) {
return false
}
if !c.RCurKeyword.Equals(cn.RCurKeyword) {
return false
}
if !c.ListSeparatorKeyword.Equals(cn.ListSeparatorKeyword) {
return false
}
if !c.ColonKeyword.Equals(cn.ColonKeyword) {
return false
}
if len(c.Comments) != len(cn.Comments) {
return false
}
for i := range c.Comments {
if !c.Comments[i].Equals(cn.Comments[i]) {
return false
}
}
return true
}
type LiteralValue struct {
Text string
BadNode bool
Location
}
func NewLiteralValue(text string, loc Location) *LiteralValue {
return &LiteralValue{
Text: text,
Location: loc,
}
}
func NewBadLiteralValue(loc Location) *LiteralValue {
return &LiteralValue{
BadNode: true,
Location: loc,
}
}
func (l *LiteralValue) Children() []Node {
return nil
}
func (l *LiteralValue) Type() string {
return "LiteralValue"
}
func (l *LiteralValue) IsBadNode() bool {
return l.BadNode
}
func (l *LiteralValue) ChildrenBadNode() bool {
children := l.Children()
for i := range children {
if children[i].IsBadNode() {
return true
}
if children[i].ChildrenBadNode() {
return true
}
}
return false
}
func (l *LiteralValue) Equals(node Node) bool {
ln, ok := node.(*LiteralValue)
if !ok {
return false
}
if (l == nil && ln != nil) ||
(l != nil && ln == nil) {
return false
} else if l == nil && ln == nil {
return true
}
if l.BadNode != ln.BadNode {
return false
}
if l.Text != ln.Text {
return false
}
return true
}
type Literal struct {
Value *LiteralValue
Quote string // single for ', double for "
Comments []*Comment
BadNode bool
Location
}
// TODO: 区分单引号还是双引号?
func NewLiteral(comments []*Comment, v *LiteralValue, quote string, loc Location) *Literal {
return &Literal{
Value: v,
Quote: quote,
Comments: comments,
Location: loc,
}
}
func NewBadLiteral(loc Location) *Literal {
return &Literal{
Location: loc,
BadNode: true,
}
}
func (l *Literal) Children() []Node {
var nodes []Node
for i := range l.Comments {
nodes = append(nodes, l.Comments[i])
}
if l.Value != nil {
nodes = append(nodes, l.Value)
}
return nodes
}
func (l *Literal) Type() string {
return "Literal"
}
func (l *Literal) IsBadNode() bool {
return l.BadNode
}
func (l *Literal) ChildrenBadNode() bool {
children := l.Children()
for i := range children {
if children[i].IsBadNode() {
return true
}
if children[i].ChildrenBadNode() {
return true
}
}
return false
}
func (l *Literal) Equals(node Node) bool {
ln, ok := node.(*Literal)
if !ok {
return false
}
if (l == nil && ln != nil) ||
(l != nil && ln == nil) {
return false
} else if l == nil && ln == nil {
return true
}
if l.BadNode != ln.BadNode {
return false
}
if !l.Value.Equals(ln.Value) {
return false
}
if l.Quote != ln.Quote {
return false
}
if len(l.Comments) != len(ln.Comments) {
return false
}
for i := range l.Comments {
if !l.Comments[i].Equals(ln.Comments[i]) {
return false
}
}
return true
}
type Annotations struct {
Annotations []*Annotation
LParKeyword *LParKeyword
RParKeyword *RParKeyword
BadNode bool
Location
}
func NewAnnotations(lpar *LParKeyword, rpar *RParKeyword, annos []*Annotation, loc Location) *Annotations {
return &Annotations{
LParKeyword: lpar,
RParKeyword: rpar,
Annotations: annos,
Location: loc,
}
}
func (a *Annotations) Type() string {
return "Annotations"
}
func (a *Annotations) Children() []Node {
nodes := []Node{a.LParKeyword, a.RParKeyword}
for i := range a.Annotations {
nodes = append(nodes, a.Annotations[i])
}
return nodes
}
func (a *Annotations) IsBadNode() bool {
return a.BadNode
}
func (a *Annotations) ChildrenBadNode() bool {
children := a.Children()
for i := range children {
if children[i].IsBadNode() {
return true
}
if children[i].ChildrenBadNode() {
return true
}
}
return false
}
func (a *Annotations) Equals(node Node) bool {
an, ok := node.(*Annotations)
if !ok {
return false
}
if (a == nil && an != nil) ||
(a != nil && an == nil) {
return false
} else if a == nil && an == nil {
return true
}
if a.BadNode != an.BadNode {
return false
}
if len(a.Annotations) != len(an.Annotations) {
return false
}
for i := range a.Annotations {
if !a.Annotations[i].Equals(an.Annotations[i]) {
return false
}
}
if !a.LParKeyword.Equals(an.LParKeyword) {
return false
}
if !a.RParKeyword.Equals(an.RParKeyword) {
return false
}
return true
}
type Annotation struct {
EqualKeyword *EqualKeyword
ListSeparatorKeyword *ListSeparatorKeyword
Identifier *Identifier
Value *Literal
BadNode bool
Location
}
func NewAnnotation(equalKeyword *EqualKeyword, listSeparatorKeyword *ListSeparatorKeyword, id *Identifier, value *Literal, loc Location) *Annotation {
return &Annotation{
EqualKeyword: equalKeyword,
ListSeparatorKeyword: listSeparatorKeyword,
Identifier: id,
Value: value,
Location: loc,
}
}
func NewBadAnnotation(loc Location) *Annotation {
return &Annotation{
BadNode: true,
Location: loc,
}
}
func (a *Annotation) Children() []Node {
nodes := []Node{a.Identifier, a.Value, a.EqualKeyword}
if a.ListSeparatorKeyword != nil {
nodes = append(nodes, a.ListSeparatorKeyword)
}
return nodes
}
func (a *Annotation) Type() string {
return "Annotation"
}
func (a *Annotation) IsBadNode() bool {
return a.BadNode
}
func (a *Annotation) ChildrenBadNode() bool {
children := a.Children()
for i := range children {
if children[i].IsBadNode() {
return true
}
if children[i].ChildrenBadNode() {
return true
}
}
return false
}
func (a *Annotation) Equals(node Node) bool {
an, ok := node.(*Annotation)
if !ok {
return false
}
if (a == nil && an != nil) ||
(a != nil && an == nil) {
return false
} else if a == nil && an == nil {
return true
}
if a.BadNode != an.BadNode {
return false
}
if !a.EqualKeyword.Equals(an.EqualKeyword) {
return false
}
if !a.ListSeparatorKeyword.Equals(an.ListSeparatorKeyword) {
return false
}
if !a.Identifier.Equals(an.Identifier) {
return false
}
if !a.Value.Equals(an.Value) {
return false
}
return true
}
type CommentStyle string
const (
CommentStyleShell CommentStyle = "shell"
CommentStyleMultiLine CommentStyle = "multiline"
CommentStyleSingleLine CommentStyle = "singleline"
)
type Comment struct {
Text string
Style CommentStyle // shell: #xxx, multiline: /* *** */, singleline: // xxxxx
BadNode bool
Location
}
func NewComment(text string, style CommentStyle, loc Location) *Comment {
return &Comment{
Text: text,
Style: style,
Location: loc,
}
}
func NewBadComment(loc Location) *Comment {
return &Comment{
BadNode: true,
Location: loc,
}
}
func (c *Comment) Children() []Node {
return nil
}
func (c *Comment) Type() string {
return "Comment"
}
func (c *Comment) IsBadNode() bool {
return c.BadNode
}
func (c *Comment) ChildrenBadNode() bool {
return false
}
func (c *Comment) Equals(node Node) bool {
cn, ok := node.(*Comment)
if !ok {
return false
}
if (c == nil && cn != nil) ||
(c != nil && cn == nil) {
return false
} else if c == nil && cn == nil {
return true
}
if c.BadNode != cn.BadNode {
return false
}
aLines := strings.Split(c.Text, "\n")
bLines := strings.Split(cn.Text, "\n")
if len(aLines) != len(bLines) {
return false
}
for i := range aLines {
if strings.TrimSpace(aLines[i]) != strings.TrimSpace(bLines[i]) {
return false
}
}
if c.Style != cn.Style {
return false
}
return true
}
type Location struct {
StartPos Position
EndPos Position
}
func (l Location) MoveStartInLine(n int) Location {
newL := l
newL.StartPos.Col += n
newL.StartPos.Offset += n
return newL
}
func (l *Location) Pos() Position {
return l.StartPos
}
// end col and offset is excluded
func (l *Location) End() Position {
return l.EndPos
}
func (l *Location) Contains(pos Position) bool {
if l == nil {
return false
}
// TODO(jpf): ut
return (l.StartPos.Less(pos) || l.StartPos.Equal(pos)) && l.EndPos.Greater(pos)
}
func NewLocationFromPos(start, end Position) Location {
return Location{StartPos: start, EndPos: end}
}
func NewLocationFromCurrent(c *current) Location {
return NewLocation(c.pos, string(c.text))
}
func NewLocation(startPos position, text string) Location {
start := ConvertPosition(startPos)
nLine := strings.Count(text, "\n") // "\r\nline 1", this will start with line 1,0 in parsed ast
if startPos.col == 0 {
nLine = nLine - 1
}
lastLineOffset := strings.LastIndexByte(text, '\n')
if lastLineOffset == -1 {
lastLineOffset = 0
}
lastLine := []byte(text)[lastLineOffset:]
col := utf8.RuneCount(lastLine) + 1
if nLine == 0 {
col += start.Col - 1
}
end := Position{
Line: start.Line + nLine,
Col: col,
Offset: start.Offset + len(text),
}
return Location{
StartPos: start,
EndPos: end,
}
}
var InvalidPosition = Position{
Line: -1,
Col: -1,
Offset: -1,
}
type Position struct {
Line int // 1-based line number
Col int // 1-based rune count from start of line.
Offset int // 0-based byte offset
}
func (p *Position) Less(other Position) bool {
if p.Line < other.Line {
return true
} else if p.Line == other.Line {
return p.Col < other.Col
}
return false
}
func (p *Position) Equal(other Position) bool {
return p.Line == other.Line && p.Col == other.Col
}
func (p *Position) Greater(other Position) bool {
if p.Line > other.Line {
return true
} else if p.Line == other.Line {
return p.Col > other.Col
}
return false
}
func (p *Position) Invalid() bool {
return p.Line < 1 || p.Col < 1 || p.Offset < 0
}
package parser
import (
"github.com/joyme123/thrift-ls/utils"
)
func SearchNodePathByPosition(root Node, pos Position) []Node {
path := make([]Node, 0)
searchNodePath(root, pos, &path)
return path
}
func searchNodePath(root Node, pos Position, path *[]Node) {
if utils.IsNil(root) {
return
}
if !root.Contains(pos) {
return
}
*path = append(*path, root)
for _, child := range root.Children() {
searchNodePath(child, pos, path)
}
}
package parser
import (
"errors"
)
// ErrorLister is the public interface to access the inner errors
// included in a errList
type ErrorLister interface {
Errors() []error
}
func (e errList) Errors() []error {
return e
}
// ParserError is the public interface to errors of type parserError
type ParserError interface {
Error() string
InnerError() error
Pos() (int, int, int)
Expected() []string
}
func (p *parserError) InnerError() error {
return p.Inner
}
func (p *parserError) Pos() (line, col, offset int) {
return p.pos.line, p.pos.col, p.pos.offset
}
func (p *parserError) Expected() []string {
return p.expected
}
var (
RequiredError error = errors.New("expecting 'required' or 'optional'")
InvalidFieldTypeError error = errors.New("expecting a valid field type")
InvalidFieldIndexError error = errors.New("expecting a valid int16 field index")
InvalidStructError error = errors.New("expecting a valid struct definition")
InvalidStructIdentifierError error = errors.New("expecting a valid struct identifier")
InvalidStructBlockLCURError error = errors.New("expecting a starting '{' of struct block")
InvalidStructBlockRCURError error = errors.New("expecting an ending '}' of struct block")
InvalidStructFieldError error = errors.New("expecting a valid struct field")
InvalidUnionError error = errors.New("expecting a valid union definition")
InvalidUnionIdentifierError error = errors.New("expecting a valid union identifier")
InvalidUnionBlockLCURError error = errors.New("expecting a starting '{' of union block")
InvalidUnionBlockRCURError error = errors.New("expecting a ending '}' of union block")
InvalidUnionFieldError error = errors.New("expecting a valid union field")
InvalidExceptionError error = errors.New("expecting a valid exception definition")
InvalidExceptionIdentifierError error = errors.New("expecting a valid exception identifier")
InvalidExceptionBlockLCURError error = errors.New("expecting a starting '{' of exception block")
InvalidExceptionBlockRCURError error = errors.New("expecting a ending '}' of exception block")
InvalidExceptionFieldError error = errors.New("expecting a valid exception field")
InvalidEnumError error = errors.New("expecting a valid enum definition")
InvalidEnumIdentifierError error = errors.New("expecting a valid enum identifier")
InvalidEnumBlockLCURError error = errors.New("expecting a starting '{' of enum block")
InvalidEnumBlockRCURError error = errors.New("expecting a ending '}' of enum block")
InvalidEnumValueError error = errors.New("expecting a valid enum field")
InvalidEnumValueIntConstantError error = errors.New("expecting a valid int contant")
InvalidTypedefError error = errors.New("expecting a valid typedef definition")
InvalidTypedefIdentifierError error = errors.New("expecting a valid typedef identifier")
InvalidConstError error = errors.New("expecting a valid const definition")
InvalidConstConstValueError error = errors.New("expecting a valid const value")
InvalidConstMissingValueError error = errors.New("expecting a const value")
InvalidConstIdentifierError error = errors.New("expecting a valid const identifier")
InvalidServiceIdentifierError error = errors.New("expecting a valid service identifier")
InvalidServiceBlockRCURError error = errors.New("expecting a ending '}' of service block")
InvalidServiceFunctionError error = errors.New("expecting a valid service function")
InvalidFunctionIdentifierError error = errors.New("expecting a valid function identifier")
InvalidFunctionArgumentError error = errors.New("expecting a valid function argument")
InvalidIdentifierError error = errors.New("expecting a valid identifier")
InvalidLiteral1MissingRightError error = errors.New("expecting a right \" ")
InvalidLiteral1Error error = errors.New("expecting a valid literal")
InvalidLiteral2MissingRightError error = errors.New("expecting a right ' ")
InvalidLiteral2Error error = errors.New("expecting a valid literal")
InvalidHeaderError error = errors.New("expecting a valid header")
InvalidIncludeError error = errors.New("expecting a valid include header")
InvalidCppIncludeError error = errors.New("expecting a valid cpp include header")
InvalidNamespaceError error = errors.New("expecting a valid namespace header")
InvalidDefinitionError error = errors.New("expecting a valid definition")
InvalidServiceError error = errors.New("expecting a valid service definition")
)
package parser
type IncludeCall func(include string) (filename string, content []byte, err error)
type Parser interface {
Parse(filename string, content []byte) *ParseResult
ParseRecursively(filename string, content []byte, maxDepth int, call IncludeCall) []*ParseResult
}
// PEGParser use PEG as a parser implementation
type PEGParser struct {
parsed map[string]struct{}
}
func (p *PEGParser) Parse(filename string, content []byte) (*Document, []error) {
if p.parsed == nil {
p.parsed = make(map[string]struct{})
}
p.parsed[filename] = struct{}{}
doc, err := Parse(filename, content)
if err != nil {
var errors []error
errList, ok := err.(ErrorLister)
if ok {
errors = errList.Errors()
} else {
errors = append(errors, err)
}
var res *Document
if doc != nil {
res = doc.(*Document)
}
return res, errors
}
return doc.(*Document), nil
}
func (p *PEGParser) ParseRecursively(filename string, content []byte, maxDepth int, call IncludeCall) []*ParseResult {
return p.parseRecursively(filename, content, 0, maxDepth, call)
}
func (p *PEGParser) parseRecursively(filename string, content []byte, curDepth int, maxDepth int, call IncludeCall) []*ParseResult {
if curDepth > maxDepth && maxDepth > 0 {
return nil
}
results := make([]*ParseResult, 0)
doc, errs := p.Parse(filename, content)
results = append(results, &ParseResult{
Doc: doc,
Errors: errs,
})
if doc != nil {
for _, include := range doc.Includes {
if include.Path == nil || include.Path.ChildrenBadNode() {
continue
}
f, c, err := call(include.Path.Value.Text)
if err == nil {
errs = append(errs, err)
}
if _, ok := p.parsed[f]; ok {
continue
}
subRes := p.parseRecursively(f, c, curDepth+1, maxDepth, call)
results = append(results, subRes...)
}
}
return results
}
type ParseResult struct {
Doc *Document
Errors []error
}
package test import "github.com/joyme123/thrift-ls/parser" func containsError(errs []error, target error) bool { for _, err := range errs { err = err.(parser.ParserError).InnerError() if err == target { return true } } return false } func equalErrors(errs []error, target []error) bool { if len(errs) != len(target) { return false } for i, err := range errs { err = err.(parser.ParserError).InnerError() if err != target[i] { return false } } return true }
// Code generated by pigeon; DO NOT EDIT.
package parser
import (
"bytes"
"errors"
"fmt"
"io"
"math"
"os"
"sort"
"strconv"
"strings"
"sync"
"unicode"
"unicode/utf8"
)
func toStringSlice(strs any) []string {
if strs == nil {
return nil
}
items := strs.([]any)
ret := make([]string, 0, len(items))
for i := range items {
var item string
if _, ok := items[i].([]interface{}); ok {
data := items[i].([]interface{})[1]
if bs, ok := data.([]uint8); ok {
item = string(bs)
} else {
item = items[i].([]interface{})[1].(string)
}
} else {
item = items[i].(string)
}
ret = append(ret, item)
}
return ret
}
func toFieldSlice(fields any) []*Field {
if fields == nil {
return nil
}
items := fields.([]any)
ret := make([]*Field, 0, len(items))
for i := range items {
item := items[i].(*Field)
ret = append(ret, item)
}
return ret
}
func toConstValueSlice(values any) []*ConstValue {
if values == nil {
return nil
}
items := values.([]any)
ret := make([]*ConstValue, 0, len(items))
for i := range items {
item := items[i].(*ConstValue)
ret = append(ret, item)
}
return ret
}
func toString(text any) string {
if text == nil {
return ""
}
data := text.([]interface{})
ret := bytes.NewBuffer(nil)
for i := range data {
ret.WriteString(data[i].(string))
}
return ret.String()
}
func toAnnotationSlice(annos any) []*Annotation {
if annos == nil {
return nil
}
items := annos.([]any)
ret := make([]*Annotation, 0, len(items))
for i := range items {
ret = append(ret, items[i].(*Annotation))
}
return ret
}
func toHeaderSlice(headers any) []Header {
if headers == nil {
return nil
}
items := headers.([]any)
ret := make([]Header, 0, len(items))
for i := range items {
ret = append(ret, items[i].(Header))
}
return ret
}
func toDefinitionSlice(defs any) []Definition {
if defs == nil {
return nil
}
items := defs.([]any)
ret := make([]Definition, 0, len(items))
for i := range items {
ret = append(ret, items[i].(Definition))
}
return ret
}
func toEnumValueSlice(v any) []*EnumValue {
if v == nil {
return nil
}
values := v.([]any)
ret := make([]*EnumValue, 0, len(values))
value := int64(0)
for i := range values {
enumV := values[i].(*EnumValue)
if enumV.ValueNode == nil {
enumV.Value = value
} else {
value = enumV.Value
}
value++
ret = append(ret, enumV)
}
return ret
}
func toFunctionSlice(fns any) []*Function {
if fns == nil {
return nil
}
items := fns.([]any)
ret := make([]*Function, 0, len(items))
for i := range items {
ret = append(ret, items[i].(*Function))
}
return ret
}
func toCommentSlice(comments any) []*Comment {
if comments == nil {
return nil
}
items := comments.([]any)
ret := make([]*Comment, 0, len(items))
for i := range items {
item := items[i]
commentItem, ok := item.(*Comment)
if ok {
ret = append(ret, commentItem)
}
}
if len(ret) == 0 {
return nil
}
return ret
}
func toAnnotations(annos any) *Annotations {
if annos == nil {
return nil
}
return annos.(*Annotations)
}
func toListSeparatorKeyword(sep any) *ListSeparatorKeyword {
if sep == nil {
return nil
}
return sep.(*ListSeparatorKeyword)
}
var g = &grammar{
rules: []*rule{
{
name: "Document",
pos: position{line: 182, col: 1, offset: 3075},
expr: &recoveryExpr{
pos: position{line: 182, col: 12, offset: 3086},
expr: &recoveryExpr{
pos: position{line: 182, col: 12, offset: 3086},
expr: &actionExpr{
pos: position{line: 182, col: 12, offset: 3086},
run: (*parser).callonDocument3,
expr: &seqExpr{
pos: position{line: 182, col: 12, offset: 3086},
exprs: []any{
&labeledExpr{
pos: position{line: 182, col: 12, offset: 3086},
label: "headers",
expr: &zeroOrMoreExpr{
pos: position{line: 182, col: 20, offset: 3094},
expr: &ruleRefExpr{
pos: position{line: 182, col: 20, offset: 3094},
name: "Header",
},
},
},
&labeledExpr{
pos: position{line: 182, col: 29, offset: 3103},
label: "defs",
expr: &zeroOrMoreExpr{
pos: position{line: 182, col: 34, offset: 3108},
expr: &ruleRefExpr{
pos: position{line: 182, col: 34, offset: 3108},
name: "Definition",
},
},
},
&labeledExpr{
pos: position{line: 182, col: 46, offset: 3120},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 182, col: 55, offset: 3129},
name: "ReservedComments",
},
},
¬Expr{
pos: position{line: 182, col: 72, offset: 3146},
expr: &anyMatcher{
line: 182, col: 73, offset: 3147,
},
},
},
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 184, col: 17, offset: 3291},
name: "ErrHeader",
},
failureLabel: []string{
"errHeader",
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 184, col: 45, offset: 3319},
name: "ErrDefinition",
},
failureLabel: []string{
"errDefinition",
},
},
},
{
name: "Header",
pos: position{line: 186, col: 1, offset: 3334},
expr: &recoveryExpr{
pos: position{line: 186, col: 10, offset: 3343},
expr: &recoveryExpr{
pos: position{line: 186, col: 10, offset: 3343},
expr: &recoveryExpr{
pos: position{line: 186, col: 10, offset: 3343},
expr: &choiceExpr{
pos: position{line: 186, col: 10, offset: 3343},
alternatives: []any{
&actionExpr{
pos: position{line: 186, col: 10, offset: 3343},
run: (*parser).callonHeader5,
expr: &seqExpr{
pos: position{line: 186, col: 10, offset: 3343},
exprs: []any{
&labeledExpr{
pos: position{line: 186, col: 10, offset: 3343},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 186, col: 19, offset: 3352},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 186, col: 36, offset: 3369},
label: "v",
expr: &choiceExpr{
pos: position{line: 186, col: 39, offset: 3372},
alternatives: []any{
&ruleRefExpr{
pos: position{line: 186, col: 39, offset: 3372},
name: "Include",
},
&ruleRefExpr{
pos: position{line: 186, col: 49, offset: 3382},
name: "CppInclude",
},
&ruleRefExpr{
pos: position{line: 186, col: 62, offset: 3395},
name: "Namespace",
},
},
},
},
&labeledExpr{
pos: position{line: 186, col: 73, offset: 3406},
label: "endLineComments",
expr: &ruleRefExpr{
pos: position{line: 186, col: 89, offset: 3422},
name: "ReservedEndLineComments",
},
},
},
},
},
&actionExpr{
pos: position{line: 190, col: 5, offset: 3579},
run: (*parser).callonHeader16,
expr: &labeledExpr{
pos: position{line: 190, col: 5, offset: 3579},
label: "x",
expr: &seqExpr{
pos: position{line: 190, col: 8, offset: 3582},
exprs: []any{
¬Expr{
pos: position{line: 190, col: 8, offset: 3582},
expr: &ruleRefExpr{
pos: position{line: 190, col: 10, offset: 3584},
name: "Definition",
},
},
&ruleRefExpr{
pos: position{line: 190, col: 22, offset: 3596},
name: "ReservedComments",
},
&andExpr{
pos: position{line: 190, col: 39, offset: 3613},
expr: &oneOrMoreExpr{
pos: position{line: 190, col: 41, offset: 3615},
expr: &anyMatcher{
line: 190, col: 41, offset: 3615,
},
},
},
&andCodeExpr{
pos: position{line: 190, col: 45, offset: 3619},
run: (*parser).callonHeader25,
},
&throwExpr{
pos: position{line: 196, col: 3, offset: 3820},
label: "errHeader",
},
},
},
},
},
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 200, col: 18, offset: 3985},
name: "ErrInclude",
},
failureLabel: []string{
"errInclude",
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 200, col: 47, offset: 4014},
name: "ErrorCppInclude",
},
failureLabel: []string{
"errCppInclude",
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 200, col: 80, offset: 4047},
name: "ErrorNamespace",
},
failureLabel: []string{
"errNamespace",
},
},
},
{
name: "Include",
pos: position{line: 202, col: 1, offset: 4063},
expr: &choiceExpr{
pos: position{line: 202, col: 11, offset: 4073},
alternatives: []any{
&actionExpr{
pos: position{line: 202, col: 11, offset: 4073},
run: (*parser).callonInclude2,
expr: &seqExpr{
pos: position{line: 202, col: 11, offset: 4073},
exprs: []any{
&labeledExpr{
pos: position{line: 202, col: 11, offset: 4073},
label: "includeKeyword",
expr: &ruleRefExpr{
pos: position{line: 202, col: 26, offset: 4088},
name: "INCLUDE",
},
},
&labeledExpr{
pos: position{line: 202, col: 34, offset: 4096},
label: "include",
expr: &ruleRefExpr{
pos: position{line: 202, col: 42, offset: 4104},
name: "Literal",
},
},
},
},
},
&actionExpr{
pos: position{line: 208, col: 5, offset: 4313},
run: (*parser).callonInclude8,
expr: &labeledExpr{
pos: position{line: 208, col: 5, offset: 4313},
label: "x",
expr: &seqExpr{
pos: position{line: 208, col: 8, offset: 4316},
exprs: []any{
&andExpr{
pos: position{line: 208, col: 8, offset: 4316},
expr: &seqExpr{
pos: position{line: 208, col: 10, offset: 4318},
exprs: []any{
&ruleRefExpr{
pos: position{line: 208, col: 10, offset: 4318},
name: "INCLUDE",
},
&zeroOrMoreExpr{
pos: position{line: 208, col: 18, offset: 4326},
expr: &anyMatcher{
line: 208, col: 18, offset: 4326,
},
},
},
},
},
&throwExpr{
pos: position{line: 208, col: 22, offset: 4330},
label: "errInclude",
},
},
},
},
},
},
},
},
{
name: "CppInclude",
pos: position{line: 213, col: 1, offset: 4377},
expr: &choiceExpr{
pos: position{line: 213, col: 15, offset: 4391},
alternatives: []any{
&actionExpr{
pos: position{line: 213, col: 15, offset: 4391},
run: (*parser).callonCppInclude2,
expr: &seqExpr{
pos: position{line: 213, col: 15, offset: 4391},
exprs: []any{
&labeledExpr{
pos: position{line: 213, col: 15, offset: 4391},
label: "cppIncludeKeyword",
expr: &ruleRefExpr{
pos: position{line: 213, col: 33, offset: 4409},
name: "CPPINCLUDE",
},
},
&labeledExpr{
pos: position{line: 213, col: 44, offset: 4420},
label: "include",
expr: &ruleRefExpr{
pos: position{line: 213, col: 52, offset: 4428},
name: "Literal",
},
},
},
},
},
&actionExpr{
pos: position{line: 219, col: 5, offset: 4646},
run: (*parser).callonCppInclude8,
expr: &labeledExpr{
pos: position{line: 219, col: 5, offset: 4646},
label: "x",
expr: &seqExpr{
pos: position{line: 219, col: 8, offset: 4649},
exprs: []any{
&andExpr{
pos: position{line: 219, col: 8, offset: 4649},
expr: &seqExpr{
pos: position{line: 219, col: 10, offset: 4651},
exprs: []any{
&ruleRefExpr{
pos: position{line: 219, col: 10, offset: 4651},
name: "CPPINCLUDE",
},
&zeroOrMoreExpr{
pos: position{line: 219, col: 21, offset: 4662},
expr: &anyMatcher{
line: 219, col: 21, offset: 4662,
},
},
},
},
},
&throwExpr{
pos: position{line: 219, col: 25, offset: 4666},
label: "errCppInclude",
},
},
},
},
},
},
},
},
{
name: "Namespace",
pos: position{line: 224, col: 1, offset: 4716},
expr: &choiceExpr{
pos: position{line: 224, col: 14, offset: 4729},
alternatives: []any{
&actionExpr{
pos: position{line: 224, col: 14, offset: 4729},
run: (*parser).callonNamespace2,
expr: &seqExpr{
pos: position{line: 224, col: 14, offset: 4729},
exprs: []any{
&labeledExpr{
pos: position{line: 224, col: 14, offset: 4729},
label: "namespaceKeyword",
expr: &ruleRefExpr{
pos: position{line: 224, col: 31, offset: 4746},
name: "NAMESPACE",
},
},
&labeledExpr{
pos: position{line: 224, col: 41, offset: 4756},
label: "language",
expr: &ruleRefExpr{
pos: position{line: 224, col: 50, offset: 4765},
name: "NamespaceScope",
},
},
&labeledExpr{
pos: position{line: 224, col: 65, offset: 4780},
label: "name",
expr: &ruleRefExpr{
pos: position{line: 224, col: 70, offset: 4785},
name: "Identifier",
},
},
&labeledExpr{
pos: position{line: 224, col: 81, offset: 4796},
label: "annotations",
expr: &zeroOrOneExpr{
pos: position{line: 224, col: 93, offset: 4808},
expr: &ruleRefExpr{
pos: position{line: 224, col: 93, offset: 4808},
name: "Annotations",
},
},
},
},
},
},
&actionExpr{
pos: position{line: 226, col: 5, offset: 4994},
run: (*parser).callonNamespace13,
expr: &labeledExpr{
pos: position{line: 226, col: 5, offset: 4994},
label: "x",
expr: &seqExpr{
pos: position{line: 226, col: 8, offset: 4997},
exprs: []any{
&andExpr{
pos: position{line: 226, col: 8, offset: 4997},
expr: &seqExpr{
pos: position{line: 226, col: 10, offset: 4999},
exprs: []any{
&ruleRefExpr{
pos: position{line: 226, col: 10, offset: 4999},
name: "NAMESPACE",
},
&zeroOrMoreExpr{
pos: position{line: 226, col: 20, offset: 5009},
expr: &anyMatcher{
line: 226, col: 20, offset: 5009,
},
},
},
},
},
&throwExpr{
pos: position{line: 226, col: 24, offset: 5013},
label: "errNamespace",
},
},
},
},
},
},
},
},
{
name: "NamespaceScope",
pos: position{line: 230, col: 1, offset: 5061},
expr: &actionExpr{
pos: position{line: 230, col: 19, offset: 5079},
run: (*parser).callonNamespaceScope1,
expr: &labeledExpr{
pos: position{line: 230, col: 19, offset: 5079},
label: "v",
expr: &choiceExpr{
pos: position{line: 230, col: 22, offset: 5082},
alternatives: []any{
&ruleRefExpr{
pos: position{line: 230, col: 22, offset: 5082},
name: "NamespaceScopeAny",
},
&ruleRefExpr{
pos: position{line: 230, col: 42, offset: 5102},
name: "Identifier",
},
},
},
},
},
},
{
name: "NamespaceScopeAny",
pos: position{line: 239, col: 1, offset: 5207},
expr: &actionExpr{
pos: position{line: 239, col: 21, offset: 5227},
run: (*parser).callonNamespaceScopeAny1,
expr: &seqExpr{
pos: position{line: 239, col: 21, offset: 5227},
exprs: []any{
&labeledExpr{
pos: position{line: 239, col: 21, offset: 5227},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 239, col: 30, offset: 5236},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 239, col: 47, offset: 5253},
label: "idName",
expr: &ruleRefExpr{
pos: position{line: 239, col: 54, offset: 5260},
name: "NamespaceScopeAnyToken",
},
},
&zeroOrMoreExpr{
pos: position{line: 239, col: 77, offset: 5283},
expr: &ruleRefExpr{
pos: position{line: 239, col: 77, offset: 5283},
name: "Indent",
},
},
},
},
},
},
{
name: "NamespaceScopeAnyToken",
pos: position{line: 243, col: 1, offset: 5399},
expr: &actionExpr{
pos: position{line: 243, col: 26, offset: 5424},
run: (*parser).callonNamespaceScopeAnyToken1,
expr: &litMatcher{
pos: position{line: 243, col: 26, offset: 5424},
val: "*",
ignoreCase: false,
want: "\"*\"",
},
},
},
{
name: "Definition",
pos: position{line: 247, col: 1, offset: 5496},
expr: &recoveryExpr{
pos: position{line: 247, col: 14, offset: 5509},
expr: &recoveryExpr{
pos: position{line: 247, col: 14, offset: 5509},
expr: &recoveryExpr{
pos: position{line: 247, col: 14, offset: 5509},
expr: &recoveryExpr{
pos: position{line: 247, col: 14, offset: 5509},
expr: &recoveryExpr{
pos: position{line: 247, col: 14, offset: 5509},
expr: &recoveryExpr{
pos: position{line: 247, col: 14, offset: 5509},
expr: &recoveryExpr{
pos: position{line: 247, col: 14, offset: 5509},
expr: &choiceExpr{
pos: position{line: 247, col: 14, offset: 5509},
alternatives: []any{
&actionExpr{
pos: position{line: 247, col: 14, offset: 5509},
run: (*parser).callonDefinition9,
expr: &seqExpr{
pos: position{line: 247, col: 14, offset: 5509},
exprs: []any{
&labeledExpr{
pos: position{line: 247, col: 14, offset: 5509},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 247, col: 23, offset: 5518},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 247, col: 40, offset: 5535},
label: "v",
expr: &choiceExpr{
pos: position{line: 247, col: 43, offset: 5538},
alternatives: []any{
&ruleRefExpr{
pos: position{line: 247, col: 43, offset: 5538},
name: "Const",
},
&ruleRefExpr{
pos: position{line: 247, col: 51, offset: 5546},
name: "Typedef",
},
&ruleRefExpr{
pos: position{line: 247, col: 61, offset: 5556},
name: "Enum",
},
&ruleRefExpr{
pos: position{line: 247, col: 68, offset: 5563},
name: "Service",
},
&ruleRefExpr{
pos: position{line: 247, col: 78, offset: 5573},
name: "Struct",
},
&ruleRefExpr{
pos: position{line: 247, col: 87, offset: 5582},
name: "Union",
},
&ruleRefExpr{
pos: position{line: 247, col: 95, offset: 5590},
name: "Exception",
},
},
},
},
&labeledExpr{
pos: position{line: 247, col: 106, offset: 5601},
label: "annos",
expr: &zeroOrOneExpr{
pos: position{line: 247, col: 112, offset: 5607},
expr: &ruleRefExpr{
pos: position{line: 247, col: 112, offset: 5607},
name: "Annotations",
},
},
},
&labeledExpr{
pos: position{line: 247, col: 125, offset: 5620},
label: "endLineComments",
expr: &ruleRefExpr{
pos: position{line: 247, col: 141, offset: 5636},
name: "ReservedEndLineComments",
},
},
},
},
},
&actionExpr{
pos: position{line: 254, col: 5, offset: 5901},
run: (*parser).callonDefinition27,
expr: &labeledExpr{
pos: position{line: 254, col: 5, offset: 5901},
label: "x",
expr: &seqExpr{
pos: position{line: 254, col: 8, offset: 5904},
exprs: []any{
&ruleRefExpr{
pos: position{line: 254, col: 8, offset: 5904},
name: "ReservedComments",
},
&andExpr{
pos: position{line: 254, col: 25, offset: 5921},
expr: &oneOrMoreExpr{
pos: position{line: 254, col: 27, offset: 5923},
expr: &anyMatcher{
line: 254, col: 27, offset: 5923,
},
},
},
&andCodeExpr{
pos: position{line: 254, col: 31, offset: 5927},
run: (*parser).callonDefinition34,
},
&throwExpr{
pos: position{line: 260, col: 3, offset: 6127},
label: "errDefinition",
},
},
},
},
},
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 263, col: 16, offset: 6261},
name: "ErrConst",
},
failureLabel: []string{
"errConst",
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 263, col: 40, offset: 6285},
name: "ErrTypedef",
},
failureLabel: []string{
"errTypedef",
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 263, col: 63, offset: 6308},
name: "ErrEnum",
},
failureLabel: []string{
"errEnum",
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 263, col: 86, offset: 6331},
name: "ErrService",
},
failureLabel: []string{
"errService",
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 263, col: 111, offset: 6356},
name: "ErrStruct",
},
failureLabel: []string{
"errStruct",
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 263, col: 134, offset: 6379},
name: "ErrUnion",
},
failureLabel: []string{
"errUnion",
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 263, col: 160, offset: 6405},
name: "ErrException",
},
failureLabel: []string{
"errException",
},
},
},
{
name: "Const",
pos: position{line: 265, col: 1, offset: 6419},
expr: &recoveryExpr{
pos: position{line: 265, col: 9, offset: 6427},
expr: &recoveryExpr{
pos: position{line: 265, col: 9, offset: 6427},
expr: &recoveryExpr{
pos: position{line: 265, col: 9, offset: 6427},
expr: &choiceExpr{
pos: position{line: 265, col: 9, offset: 6427},
alternatives: []any{
&actionExpr{
pos: position{line: 265, col: 9, offset: 6427},
run: (*parser).callonConst5,
expr: &seqExpr{
pos: position{line: 265, col: 9, offset: 6427},
exprs: []any{
&labeledExpr{
pos: position{line: 265, col: 9, offset: 6427},
label: "constKeyword",
expr: &ruleRefExpr{
pos: position{line: 265, col: 22, offset: 6440},
name: "CONST",
},
},
&labeledExpr{
pos: position{line: 265, col: 28, offset: 6446},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 265, col: 30, offset: 6448},
name: "FieldType",
},
},
&labeledExpr{
pos: position{line: 265, col: 40, offset: 6458},
label: "name",
expr: &ruleRefExpr{
pos: position{line: 265, col: 45, offset: 6463},
name: "DefinitionIdentifier",
},
},
&labeledExpr{
pos: position{line: 265, col: 66, offset: 6484},
label: "v",
expr: &ruleRefExpr{
pos: position{line: 265, col: 68, offset: 6486},
name: "ConstEqualValue",
},
},
&labeledExpr{
pos: position{line: 265, col: 84, offset: 6502},
label: "sep",
expr: &zeroOrOneExpr{
pos: position{line: 265, col: 88, offset: 6506},
expr: &ruleRefExpr{
pos: position{line: 265, col: 88, offset: 6506},
name: "ListSeparator",
},
},
},
},
},
},
&actionExpr{
pos: position{line: 268, col: 5, offset: 6765},
run: (*parser).callonConst18,
expr: &labeledExpr{
pos: position{line: 268, col: 5, offset: 6765},
label: "x",
expr: &seqExpr{
pos: position{line: 268, col: 8, offset: 6768},
exprs: []any{
&andExpr{
pos: position{line: 268, col: 8, offset: 6768},
expr: &seqExpr{
pos: position{line: 268, col: 10, offset: 6770},
exprs: []any{
&ruleRefExpr{
pos: position{line: 268, col: 10, offset: 6770},
name: "CONST",
},
&zeroOrMoreExpr{
pos: position{line: 268, col: 16, offset: 6776},
expr: &anyMatcher{
line: 268, col: 16, offset: 6776,
},
},
},
},
},
&throwExpr{
pos: position{line: 268, col: 20, offset: 6780},
label: "errConst",
},
},
},
},
},
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 270, col: 21, offset: 6841},
name: "ErrConstIdentifier",
},
failureLabel: []string{
"errIdentifier",
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 270, col: 65, offset: 6885},
name: "ErrConstMissingValue",
},
failureLabel: []string{
"errConstMissingValue",
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 270, col: 109, offset: 6929},
name: "ErrConstConstValue",
},
failureLabel: []string{
"errConstConstValue",
},
},
},
{
name: "ConstEqualValue",
pos: position{line: 272, col: 1, offset: 6949},
expr: &choiceExpr{
pos: position{line: 272, col: 19, offset: 6967},
alternatives: []any{
&actionExpr{
pos: position{line: 272, col: 19, offset: 6967},
run: (*parser).callonConstEqualValue2,
expr: &labeledExpr{
pos: position{line: 272, col: 19, offset: 6967},
label: "v",
expr: &seqExpr{
pos: position{line: 272, col: 22, offset: 6970},
exprs: []any{
&ruleRefExpr{
pos: position{line: 272, col: 22, offset: 6970},
name: "EQUAL",
},
&ruleRefExpr{
pos: position{line: 272, col: 28, offset: 6976},
name: "ConstValue",
},
},
},
},
},
&actionExpr{
pos: position{line: 274, col: 5, offset: 7009},
run: (*parser).callonConstEqualValue7,
expr: &labeledExpr{
pos: position{line: 274, col: 5, offset: 7009},
label: "x",
expr: &seqExpr{
pos: position{line: 274, col: 8, offset: 7012},
exprs: []any{
¬Expr{
pos: position{line: 274, col: 8, offset: 7012},
expr: &ruleRefExpr{
pos: position{line: 274, col: 9, offset: 7013},
name: "EQUAL",
},
},
&throwExpr{
pos: position{line: 274, col: 15, offset: 7019},
label: "errConstMissingValue",
},
},
},
},
},
&actionExpr{
pos: position{line: 276, col: 5, offset: 7105},
run: (*parser).callonConstEqualValue13,
expr: &labeledExpr{
pos: position{line: 276, col: 5, offset: 7105},
label: "x",
expr: &seqExpr{
pos: position{line: 276, col: 8, offset: 7108},
exprs: []any{
&ruleRefExpr{
pos: position{line: 276, col: 8, offset: 7108},
name: "EQUAL",
},
&throwExpr{
pos: position{line: 276, col: 14, offset: 7114},
label: "errConstConstValue",
},
},
},
},
},
},
},
},
{
name: "Typedef",
pos: position{line: 280, col: 1, offset: 7157},
expr: &recoveryExpr{
pos: position{line: 280, col: 11, offset: 7167},
expr: &choiceExpr{
pos: position{line: 280, col: 11, offset: 7167},
alternatives: []any{
&actionExpr{
pos: position{line: 280, col: 11, offset: 7167},
run: (*parser).callonTypedef3,
expr: &seqExpr{
pos: position{line: 280, col: 11, offset: 7167},
exprs: []any{
&labeledExpr{
pos: position{line: 280, col: 11, offset: 7167},
label: "typedefKeyword",
expr: &ruleRefExpr{
pos: position{line: 280, col: 26, offset: 7182},
name: "TYPEDEF",
},
},
&labeledExpr{
pos: position{line: 280, col: 34, offset: 7190},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 280, col: 36, offset: 7192},
name: "FieldType",
},
},
&labeledExpr{
pos: position{line: 280, col: 46, offset: 7202},
label: "alias",
expr: &ruleRefExpr{
pos: position{line: 280, col: 52, offset: 7208},
name: "DefinitionIdentifier",
},
},
},
},
},
&actionExpr{
pos: position{line: 282, col: 5, offset: 7357},
run: (*parser).callonTypedef11,
expr: &labeledExpr{
pos: position{line: 282, col: 5, offset: 7357},
label: "x",
expr: &seqExpr{
pos: position{line: 282, col: 8, offset: 7360},
exprs: []any{
&andExpr{
pos: position{line: 282, col: 8, offset: 7360},
expr: &seqExpr{
pos: position{line: 282, col: 10, offset: 7362},
exprs: []any{
&ruleRefExpr{
pos: position{line: 282, col: 10, offset: 7362},
name: "TYPEDEF",
},
&zeroOrMoreExpr{
pos: position{line: 282, col: 18, offset: 7370},
expr: &anyMatcher{
line: 282, col: 18, offset: 7370,
},
},
},
},
},
&throwExpr{
pos: position{line: 282, col: 22, offset: 7374},
label: "errTypedef",
},
},
},
},
},
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 284, col: 21, offset: 7437},
name: "ErrTypedefIdentifier",
},
failureLabel: []string{
"errIdentifier",
},
},
},
{
name: "Enum",
pos: position{line: 286, col: 1, offset: 7459},
expr: &recoveryExpr{
pos: position{line: 286, col: 8, offset: 7466},
expr: &recoveryExpr{
pos: position{line: 286, col: 8, offset: 7466},
expr: &recoveryExpr{
pos: position{line: 286, col: 8, offset: 7466},
expr: &choiceExpr{
pos: position{line: 286, col: 8, offset: 7466},
alternatives: []any{
&actionExpr{
pos: position{line: 286, col: 8, offset: 7466},
run: (*parser).callonEnum5,
expr: &seqExpr{
pos: position{line: 286, col: 8, offset: 7466},
exprs: []any{
&labeledExpr{
pos: position{line: 286, col: 8, offset: 7466},
label: "enum",
expr: &ruleRefExpr{
pos: position{line: 286, col: 13, offset: 7471},
name: "ENUM",
},
},
&labeledExpr{
pos: position{line: 286, col: 18, offset: 7476},
label: "name",
expr: &ruleRefExpr{
pos: position{line: 286, col: 23, offset: 7481},
name: "DefinitionIdentifier",
},
},
&labeledExpr{
pos: position{line: 286, col: 44, offset: 7502},
label: "lcur",
expr: &ruleRefExpr{
pos: position{line: 286, col: 49, offset: 7507},
name: "LCUR",
},
},
&labeledExpr{
pos: position{line: 286, col: 54, offset: 7512},
label: "v",
expr: &zeroOrMoreExpr{
pos: position{line: 286, col: 56, offset: 7514},
expr: &ruleRefExpr{
pos: position{line: 286, col: 56, offset: 7514},
name: "EnumValueLine",
},
},
},
&labeledExpr{
pos: position{line: 286, col: 71, offset: 7529},
label: "rcur",
expr: &ruleRefExpr{
pos: position{line: 286, col: 76, offset: 7534},
name: "RCUR",
},
},
},
},
},
&actionExpr{
pos: position{line: 289, col: 5, offset: 7715},
run: (*parser).callonEnum18,
expr: &labeledExpr{
pos: position{line: 289, col: 5, offset: 7715},
label: "x",
expr: &seqExpr{
pos: position{line: 289, col: 8, offset: 7718},
exprs: []any{
&andExpr{
pos: position{line: 289, col: 8, offset: 7718},
expr: &seqExpr{
pos: position{line: 289, col: 10, offset: 7720},
exprs: []any{
&ruleRefExpr{
pos: position{line: 289, col: 10, offset: 7720},
name: "ENUM",
},
&zeroOrMoreExpr{
pos: position{line: 289, col: 15, offset: 7725},
expr: &anyMatcher{
line: 289, col: 15, offset: 7725,
},
},
},
},
},
&throwExpr{
pos: position{line: 289, col: 19, offset: 7729},
label: "errEnum",
},
},
},
},
},
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 291, col: 21, offset: 7789},
name: "ErrEnumIdentifier",
},
failureLabel: []string{
"errIdentifier",
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 291, col: 51, offset: 7819},
name: "ErrEnumRCUR",
},
failureLabel: []string{
"errRCUR",
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 291, col: 80, offset: 7848},
name: "ErrEnumValue",
},
failureLabel: []string{
"errEnumValue",
},
},
},
{
name: "EnumValueLine",
pos: position{line: 293, col: 1, offset: 7862},
expr: &actionExpr{
pos: position{line: 293, col: 17, offset: 7878},
run: (*parser).callonEnumValueLine1,
expr: &seqExpr{
pos: position{line: 293, col: 17, offset: 7878},
exprs: []any{
&labeledExpr{
pos: position{line: 293, col: 17, offset: 7878},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 293, col: 26, offset: 7887},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 293, col: 43, offset: 7904},
label: "v",
expr: &ruleRefExpr{
pos: position{line: 293, col: 45, offset: 7906},
name: "EnumValue",
},
},
&labeledExpr{
pos: position{line: 293, col: 55, offset: 7916},
label: "endLineComments",
expr: &ruleRefExpr{
pos: position{line: 293, col: 71, offset: 7932},
name: "ReservedEndLineComments",
},
},
},
},
},
},
{
name: "EnumValue",
pos: position{line: 298, col: 1, offset: 8064},
expr: &recoveryExpr{
pos: position{line: 298, col: 14, offset: 8077},
expr: &actionExpr{
pos: position{line: 298, col: 14, offset: 8077},
run: (*parser).callonEnumValue2,
expr: &seqExpr{
pos: position{line: 298, col: 14, offset: 8077},
exprs: []any{
&labeledExpr{
pos: position{line: 298, col: 14, offset: 8077},
label: "name",
expr: &ruleRefExpr{
pos: position{line: 298, col: 19, offset: 8082},
name: "Identifier",
},
},
&labeledExpr{
pos: position{line: 298, col: 30, offset: 8093},
label: "value",
expr: &zeroOrOneExpr{
pos: position{line: 298, col: 36, offset: 8099},
expr: &ruleRefExpr{
pos: position{line: 298, col: 37, offset: 8100},
name: "EnumValueIntConstant",
},
},
},
&labeledExpr{
pos: position{line: 298, col: 60, offset: 8123},
label: "annos",
expr: &zeroOrOneExpr{
pos: position{line: 298, col: 66, offset: 8129},
expr: &ruleRefExpr{
pos: position{line: 298, col: 66, offset: 8129},
name: "Annotations",
},
},
},
&labeledExpr{
pos: position{line: 298, col: 79, offset: 8142},
label: "sep",
expr: &zeroOrOneExpr{
pos: position{line: 298, col: 83, offset: 8146},
expr: &ruleRefExpr{
pos: position{line: 298, col: 83, offset: 8146},
name: "ListSeparator",
},
},
},
},
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 310, col: 22, offset: 8603},
name: "ErrEnumValueIntConstant",
},
failureLabel: []string{
"errIntConstant",
},
},
},
{
name: "Service",
pos: position{line: 312, col: 1, offset: 8628},
expr: &recoveryExpr{
pos: position{line: 312, col: 11, offset: 8638},
expr: &recoveryExpr{
pos: position{line: 312, col: 11, offset: 8638},
expr: &recoveryExpr{
pos: position{line: 312, col: 11, offset: 8638},
expr: &choiceExpr{
pos: position{line: 312, col: 11, offset: 8638},
alternatives: []any{
&actionExpr{
pos: position{line: 312, col: 11, offset: 8638},
run: (*parser).callonService5,
expr: &seqExpr{
pos: position{line: 312, col: 11, offset: 8638},
exprs: []any{
&labeledExpr{
pos: position{line: 312, col: 11, offset: 8638},
label: "svc",
expr: &ruleRefExpr{
pos: position{line: 312, col: 15, offset: 8642},
name: "SERVICE",
},
},
&labeledExpr{
pos: position{line: 312, col: 23, offset: 8650},
label: "name",
expr: &ruleRefExpr{
pos: position{line: 312, col: 28, offset: 8655},
name: "DefinitionIdentifier",
},
},
&labeledExpr{
pos: position{line: 312, col: 49, offset: 8676},
label: "extends",
expr: &zeroOrOneExpr{
pos: position{line: 312, col: 57, offset: 8684},
expr: &seqExpr{
pos: position{line: 312, col: 59, offset: 8686},
exprs: []any{
&ruleRefExpr{
pos: position{line: 312, col: 59, offset: 8686},
name: "EXTENDS",
},
&ruleRefExpr{
pos: position{line: 312, col: 67, offset: 8694},
name: "Identifier",
},
},
},
},
},
&labeledExpr{
pos: position{line: 312, col: 81, offset: 8708},
label: "lcur",
expr: &ruleRefExpr{
pos: position{line: 312, col: 86, offset: 8713},
name: "LCUR",
},
},
&labeledExpr{
pos: position{line: 312, col: 91, offset: 8718},
label: "fns",
expr: &zeroOrMoreExpr{
pos: position{line: 312, col: 95, offset: 8722},
expr: &ruleRefExpr{
pos: position{line: 312, col: 95, offset: 8722},
name: "Function",
},
},
},
&labeledExpr{
pos: position{line: 312, col: 105, offset: 8732},
label: "rcur",
expr: &ruleRefExpr{
pos: position{line: 312, col: 110, offset: 8737},
name: "RCUR",
},
},
},
},
},
&actionExpr{
pos: position{line: 321, col: 5, offset: 9144},
run: (*parser).callonService23,
expr: &labeledExpr{
pos: position{line: 321, col: 5, offset: 9144},
label: "x",
expr: &seqExpr{
pos: position{line: 321, col: 8, offset: 9147},
exprs: []any{
&andExpr{
pos: position{line: 321, col: 8, offset: 9147},
expr: &seqExpr{
pos: position{line: 321, col: 10, offset: 9149},
exprs: []any{
&ruleRefExpr{
pos: position{line: 321, col: 10, offset: 9149},
name: "SERVICE",
},
&zeroOrMoreExpr{
pos: position{line: 321, col: 18, offset: 9157},
expr: &anyMatcher{
line: 321, col: 18, offset: 9157,
},
},
},
},
},
&throwExpr{
pos: position{line: 321, col: 22, offset: 9161},
label: "errService",
},
},
},
},
},
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 323, col: 21, offset: 9224},
name: "ErrServiceIdentifier",
},
failureLabel: []string{
"errIdentifier",
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 323, col: 54, offset: 9257},
name: "ErrServiceRCUR",
},
failureLabel: []string{
"errRCUR",
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 323, col: 85, offset: 9288},
name: "ErrServiceFunction",
},
failureLabel: []string{
"errFunction",
},
},
},
{
name: "Struct",
pos: position{line: 325, col: 1, offset: 9309},
expr: &recoveryExpr{
pos: position{line: 325, col: 10, offset: 9318},
expr: &recoveryExpr{
pos: position{line: 325, col: 10, offset: 9318},
expr: &recoveryExpr{
pos: position{line: 325, col: 10, offset: 9318},
expr: &choiceExpr{
pos: position{line: 325, col: 10, offset: 9318},
alternatives: []any{
&actionExpr{
pos: position{line: 325, col: 10, offset: 9318},
run: (*parser).callonStruct5,
expr: &seqExpr{
pos: position{line: 325, col: 10, offset: 9318},
exprs: []any{
&labeledExpr{
pos: position{line: 325, col: 10, offset: 9318},
label: "st",
expr: &ruleRefExpr{
pos: position{line: 325, col: 13, offset: 9321},
name: "STRUCT",
},
},
&labeledExpr{
pos: position{line: 325, col: 20, offset: 9328},
label: "id",
expr: &ruleRefExpr{
pos: position{line: 325, col: 23, offset: 9331},
name: "DefinitionIdentifier",
},
},
&labeledExpr{
pos: position{line: 325, col: 44, offset: 9352},
label: "lcur",
expr: &ruleRefExpr{
pos: position{line: 325, col: 49, offset: 9357},
name: "LCUR",
},
},
&labeledExpr{
pos: position{line: 325, col: 54, offset: 9362},
label: "fields",
expr: &zeroOrMoreExpr{
pos: position{line: 325, col: 61, offset: 9369},
expr: &ruleRefExpr{
pos: position{line: 325, col: 61, offset: 9369},
name: "FieldWithThrow",
},
},
},
&labeledExpr{
pos: position{line: 325, col: 77, offset: 9385},
label: "rcur",
expr: &ruleRefExpr{
pos: position{line: 325, col: 82, offset: 9390},
name: "RCUR",
},
},
},
},
},
&actionExpr{
pos: position{line: 327, col: 5, offset: 9554},
run: (*parser).callonStruct18,
expr: &labeledExpr{
pos: position{line: 327, col: 5, offset: 9554},
label: "x",
expr: &seqExpr{
pos: position{line: 327, col: 8, offset: 9557},
exprs: []any{
&andExpr{
pos: position{line: 327, col: 8, offset: 9557},
expr: &seqExpr{
pos: position{line: 327, col: 10, offset: 9559},
exprs: []any{
&ruleRefExpr{
pos: position{line: 327, col: 10, offset: 9559},
name: "STRUCT",
},
&zeroOrMoreExpr{
pos: position{line: 327, col: 17, offset: 9566},
expr: &anyMatcher{
line: 327, col: 17, offset: 9566,
},
},
},
},
},
&throwExpr{
pos: position{line: 327, col: 21, offset: 9570},
label: "errStruct",
},
},
},
},
},
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 329, col: 21, offset: 9632},
name: "ErrStructIdentifier",
},
failureLabel: []string{
"errIdentifier",
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 329, col: 53, offset: 9664},
name: "ErrStructRCUR",
},
failureLabel: []string{
"errRCUR",
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 329, col: 81, offset: 9692},
name: "ErrStructField",
},
failureLabel: []string{
"errField",
},
},
},
{
name: "Union",
pos: position{line: 331, col: 1, offset: 9708},
expr: &recoveryExpr{
pos: position{line: 331, col: 9, offset: 9716},
expr: &recoveryExpr{
pos: position{line: 331, col: 9, offset: 9716},
expr: &recoveryExpr{
pos: position{line: 331, col: 9, offset: 9716},
expr: &choiceExpr{
pos: position{line: 331, col: 9, offset: 9716},
alternatives: []any{
&actionExpr{
pos: position{line: 331, col: 9, offset: 9716},
run: (*parser).callonUnion5,
expr: &seqExpr{
pos: position{line: 331, col: 9, offset: 9716},
exprs: []any{
&labeledExpr{
pos: position{line: 331, col: 9, offset: 9716},
label: "union",
expr: &ruleRefExpr{
pos: position{line: 331, col: 15, offset: 9722},
name: "UNION",
},
},
&labeledExpr{
pos: position{line: 331, col: 21, offset: 9728},
label: "name",
expr: &ruleRefExpr{
pos: position{line: 331, col: 26, offset: 9733},
name: "DefinitionIdentifier",
},
},
&labeledExpr{
pos: position{line: 331, col: 47, offset: 9754},
label: "lcur",
expr: &ruleRefExpr{
pos: position{line: 331, col: 52, offset: 9759},
name: "LCUR",
},
},
&labeledExpr{
pos: position{line: 331, col: 57, offset: 9764},
label: "fields",
expr: &zeroOrMoreExpr{
pos: position{line: 331, col: 64, offset: 9771},
expr: &ruleRefExpr{
pos: position{line: 331, col: 64, offset: 9771},
name: "FieldWithThrow",
},
},
},
&labeledExpr{
pos: position{line: 331, col: 80, offset: 9787},
label: "rcur",
expr: &ruleRefExpr{
pos: position{line: 331, col: 85, offset: 9792},
name: "RCUR",
},
},
},
},
},
&actionExpr{
pos: position{line: 333, col: 5, offset: 9959},
run: (*parser).callonUnion18,
expr: &labeledExpr{
pos: position{line: 333, col: 5, offset: 9959},
label: "x",
expr: &seqExpr{
pos: position{line: 333, col: 8, offset: 9962},
exprs: []any{
&andExpr{
pos: position{line: 333, col: 8, offset: 9962},
expr: &seqExpr{
pos: position{line: 333, col: 10, offset: 9964},
exprs: []any{
&ruleRefExpr{
pos: position{line: 333, col: 10, offset: 9964},
name: "UNION",
},
&zeroOrMoreExpr{
pos: position{line: 333, col: 16, offset: 9970},
expr: &anyMatcher{
line: 333, col: 16, offset: 9970,
},
},
},
},
},
&throwExpr{
pos: position{line: 333, col: 20, offset: 9974},
label: "errUnion",
},
},
},
},
},
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 335, col: 21, offset: 10035},
name: "ErrUnionIdentifier",
},
failureLabel: []string{
"errIdentifier",
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 335, col: 52, offset: 10066},
name: "ErrUnionRCUR",
},
failureLabel: []string{
"errRCUR",
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 335, col: 78, offset: 10092},
name: "ErrUnionField",
},
failureLabel: []string{
"errField",
},
},
},
{
name: "Exception",
pos: position{line: 338, col: 1, offset: 10108},
expr: &recoveryExpr{
pos: position{line: 338, col: 14, offset: 10121},
expr: &recoveryExpr{
pos: position{line: 338, col: 14, offset: 10121},
expr: &recoveryExpr{
pos: position{line: 338, col: 14, offset: 10121},
expr: &choiceExpr{
pos: position{line: 338, col: 14, offset: 10121},
alternatives: []any{
&actionExpr{
pos: position{line: 338, col: 14, offset: 10121},
run: (*parser).callonException5,
expr: &seqExpr{
pos: position{line: 338, col: 14, offset: 10121},
exprs: []any{
&labeledExpr{
pos: position{line: 338, col: 14, offset: 10121},
label: "excep",
expr: &ruleRefExpr{
pos: position{line: 338, col: 20, offset: 10127},
name: "EXCEPTION",
},
},
&labeledExpr{
pos: position{line: 338, col: 30, offset: 10137},
label: "name",
expr: &ruleRefExpr{
pos: position{line: 338, col: 35, offset: 10142},
name: "DefinitionIdentifier",
},
},
&labeledExpr{
pos: position{line: 338, col: 56, offset: 10163},
label: "lcur",
expr: &ruleRefExpr{
pos: position{line: 338, col: 61, offset: 10168},
name: "LCUR",
},
},
&labeledExpr{
pos: position{line: 338, col: 66, offset: 10173},
label: "fields",
expr: &zeroOrMoreExpr{
pos: position{line: 338, col: 73, offset: 10180},
expr: &ruleRefExpr{
pos: position{line: 338, col: 73, offset: 10180},
name: "FieldWithThrow",
},
},
},
&labeledExpr{
pos: position{line: 338, col: 89, offset: 10196},
label: "rcur",
expr: &ruleRefExpr{
pos: position{line: 338, col: 94, offset: 10201},
name: "RCUR",
},
},
},
},
},
&actionExpr{
pos: position{line: 340, col: 5, offset: 10376},
run: (*parser).callonException18,
expr: &labeledExpr{
pos: position{line: 340, col: 5, offset: 10376},
label: "x",
expr: &seqExpr{
pos: position{line: 340, col: 8, offset: 10379},
exprs: []any{
&andExpr{
pos: position{line: 340, col: 8, offset: 10379},
expr: &seqExpr{
pos: position{line: 340, col: 10, offset: 10381},
exprs: []any{
&ruleRefExpr{
pos: position{line: 340, col: 10, offset: 10381},
name: "EXCEPTION",
},
&zeroOrMoreExpr{
pos: position{line: 340, col: 20, offset: 10391},
expr: &anyMatcher{
line: 340, col: 20, offset: 10391,
},
},
},
},
},
&throwExpr{
pos: position{line: 340, col: 24, offset: 10395},
label: "errException",
},
},
},
},
},
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 342, col: 21, offset: 10460},
name: "ErrExceptionIdentifier",
},
failureLabel: []string{
"errIdentifier",
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 342, col: 56, offset: 10495},
name: "ErrExceptionRCUR",
},
failureLabel: []string{
"errRCUR",
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 342, col: 86, offset: 10525},
name: "ErrExceptionField",
},
failureLabel: []string{
"errField",
},
},
},
{
name: "FieldWithThrow",
pos: position{line: 345, col: 1, offset: 10545},
expr: &choiceExpr{
pos: position{line: 345, col: 18, offset: 10562},
alternatives: []any{
&ruleRefExpr{
pos: position{line: 345, col: 18, offset: 10562},
name: "Field",
},
&actionExpr{
pos: position{line: 345, col: 26, offset: 10570},
run: (*parser).callonFieldWithThrow3,
expr: &labeledExpr{
pos: position{line: 345, col: 26, offset: 10570},
label: "x",
expr: &seqExpr{
pos: position{line: 345, col: 30, offset: 10574},
exprs: []any{
&ruleRefExpr{
pos: position{line: 345, col: 30, offset: 10574},
name: "ReservedComments",
},
¬Expr{
pos: position{line: 345, col: 47, offset: 10591},
expr: &choiceExpr{
pos: position{line: 345, col: 49, offset: 10593},
alternatives: []any{
&seqExpr{
pos: position{line: 345, col: 51, offset: 10595},
exprs: []any{
&litMatcher{
pos: position{line: 345, col: 51, offset: 10595},
val: "}",
ignoreCase: false,
want: "\"}\"",
},
&zeroOrMoreExpr{
pos: position{line: 345, col: 55, offset: 10599},
expr: &ruleRefExpr{
pos: position{line: 345, col: 55, offset: 10599},
name: "Indent",
},
},
},
},
&ruleRefExpr{
pos: position{line: 345, col: 66, offset: 10610},
name: "DefinitionStart",
},
},
},
},
&throwExpr{
pos: position{line: 345, col: 84, offset: 10628},
label: "errField",
},
},
},
},
},
},
},
},
{
name: "Field",
pos: position{line: 349, col: 1, offset: 10673},
expr: &actionExpr{
pos: position{line: 349, col: 9, offset: 10681},
run: (*parser).callonField1,
expr: &seqExpr{
pos: position{line: 349, col: 9, offset: 10681},
exprs: []any{
&labeledExpr{
pos: position{line: 349, col: 9, offset: 10681},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 349, col: 18, offset: 10690},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 349, col: 35, offset: 10707},
label: "index",
expr: &ruleRefExpr{
pos: position{line: 349, col: 41, offset: 10713},
name: "FieldId",
},
},
&labeledExpr{
pos: position{line: 349, col: 49, offset: 10721},
label: "required",
expr: &zeroOrOneExpr{
pos: position{line: 349, col: 58, offset: 10730},
expr: &ruleRefExpr{
pos: position{line: 349, col: 58, offset: 10730},
name: "FieldReq",
},
},
},
&labeledExpr{
pos: position{line: 349, col: 68, offset: 10740},
label: "fieldType",
expr: &ruleRefExpr{
pos: position{line: 349, col: 78, offset: 10750},
name: "FieldType",
},
},
&labeledExpr{
pos: position{line: 349, col: 88, offset: 10760},
label: "id",
expr: &ruleRefExpr{
pos: position{line: 349, col: 91, offset: 10763},
name: "Identifier",
},
},
&labeledExpr{
pos: position{line: 349, col: 102, offset: 10774},
label: "value",
expr: &zeroOrOneExpr{
pos: position{line: 349, col: 108, offset: 10780},
expr: &seqExpr{
pos: position{line: 349, col: 109, offset: 10781},
exprs: []any{
&ruleRefExpr{
pos: position{line: 349, col: 109, offset: 10781},
name: "EQUAL",
},
&ruleRefExpr{
pos: position{line: 349, col: 115, offset: 10787},
name: "ConstValue",
},
},
},
},
},
&labeledExpr{
pos: position{line: 349, col: 128, offset: 10800},
label: "annos",
expr: &zeroOrOneExpr{
pos: position{line: 349, col: 134, offset: 10806},
expr: &ruleRefExpr{
pos: position{line: 349, col: 134, offset: 10806},
name: "Annotations",
},
},
},
&labeledExpr{
pos: position{line: 349, col: 147, offset: 10819},
label: "sep",
expr: &zeroOrOneExpr{
pos: position{line: 349, col: 151, offset: 10823},
expr: &ruleRefExpr{
pos: position{line: 349, col: 151, offset: 10823},
name: "ListSeparator",
},
},
},
&labeledExpr{
pos: position{line: 349, col: 166, offset: 10838},
label: "lineComments",
expr: &ruleRefExpr{
pos: position{line: 349, col: 179, offset: 10851},
name: "ReservedEndLineComments",
},
},
},
},
},
},
{
name: "FieldId",
pos: position{line: 365, col: 1, offset: 11416},
expr: &recoveryExpr{
pos: position{line: 365, col: 11, offset: 11426},
expr: &actionExpr{
pos: position{line: 365, col: 11, offset: 11426},
run: (*parser).callonFieldId2,
expr: &seqExpr{
pos: position{line: 365, col: 11, offset: 11426},
exprs: []any{
&labeledExpr{
pos: position{line: 365, col: 11, offset: 11426},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 365, col: 20, offset: 11435},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 365, col: 37, offset: 11452},
label: "i",
expr: &ruleRefExpr{
pos: position{line: 365, col: 39, offset: 11454},
name: "FieldIndex",
},
},
&labeledExpr{
pos: position{line: 365, col: 50, offset: 11465},
label: "colon",
expr: &ruleRefExpr{
pos: position{line: 365, col: 56, offset: 11471},
name: "COLON",
},
},
&zeroOrMoreExpr{
pos: position{line: 365, col: 62, offset: 11477},
expr: &ruleRefExpr{
pos: position{line: 365, col: 62, offset: 11477},
name: "Indent",
},
},
},
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 370, col: 21, offset: 11653},
name: "ErrFieldIndex",
},
failureLabel: []string{
"errFieldIndex",
},
},
},
{
name: "FieldReq",
pos: position{line: 372, col: 1, offset: 11668},
expr: &actionExpr{
pos: position{line: 372, col: 12, offset: 11679},
run: (*parser).callonFieldReq1,
expr: &seqExpr{
pos: position{line: 372, col: 12, offset: 11679},
exprs: []any{
&labeledExpr{
pos: position{line: 372, col: 12, offset: 11679},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 372, col: 21, offset: 11688},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 372, col: 38, offset: 11705},
label: "r",
expr: &ruleRefExpr{
pos: position{line: 372, col: 40, offset: 11707},
name: "IsRequired",
},
},
&zeroOrMoreExpr{
pos: position{line: 372, col: 51, offset: 11718},
expr: &ruleRefExpr{
pos: position{line: 372, col: 51, offset: 11718},
name: "Indent",
},
},
},
},
},
},
{
name: "IsRequired",
pos: position{line: 377, col: 1, offset: 11863},
expr: &actionExpr{
pos: position{line: 377, col: 14, offset: 11876},
run: (*parser).callonIsRequired1,
expr: &labeledExpr{
pos: position{line: 377, col: 14, offset: 11876},
label: "v",
expr: &choiceExpr{
pos: position{line: 377, col: 17, offset: 11879},
alternatives: []any{
&ruleRefExpr{
pos: position{line: 377, col: 17, offset: 11879},
name: "RequiredToken",
},
&ruleRefExpr{
pos: position{line: 377, col: 33, offset: 11895},
name: "OptionalToken",
},
},
},
},
},
},
{
name: "RequiredToken",
pos: position{line: 381, col: 1, offset: 11930},
expr: &actionExpr{
pos: position{line: 381, col: 17, offset: 11946},
run: (*parser).callonRequiredToken1,
expr: &litMatcher{
pos: position{line: 381, col: 17, offset: 11946},
val: "required",
ignoreCase: false,
want: "\"required\"",
},
},
},
{
name: "OptionalToken",
pos: position{line: 385, col: 1, offset: 11996},
expr: &actionExpr{
pos: position{line: 385, col: 17, offset: 12012},
run: (*parser).callonOptionalToken1,
expr: &litMatcher{
pos: position{line: 385, col: 17, offset: 12012},
val: "optional",
ignoreCase: false,
want: "\"optional\"",
},
},
},
{
name: "Function",
pos: position{line: 389, col: 1, offset: 12062},
expr: &recoveryExpr{
pos: position{line: 389, col: 12, offset: 12073},
expr: &recoveryExpr{
pos: position{line: 389, col: 12, offset: 12073},
expr: &choiceExpr{
pos: position{line: 389, col: 12, offset: 12073},
alternatives: []any{
&actionExpr{
pos: position{line: 389, col: 12, offset: 12073},
run: (*parser).callonFunction4,
expr: &seqExpr{
pos: position{line: 389, col: 12, offset: 12073},
exprs: []any{
&labeledExpr{
pos: position{line: 389, col: 12, offset: 12073},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 389, col: 21, offset: 12082},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 389, col: 38, offset: 12099},
label: "oneway",
expr: &zeroOrOneExpr{
pos: position{line: 389, col: 45, offset: 12106},
expr: &ruleRefExpr{
pos: position{line: 389, col: 45, offset: 12106},
name: "ONEWAY",
},
},
},
&labeledExpr{
pos: position{line: 389, col: 53, offset: 12114},
label: "ft",
expr: &ruleRefExpr{
pos: position{line: 389, col: 56, offset: 12117},
name: "FunctionType",
},
},
&labeledExpr{
pos: position{line: 389, col: 69, offset: 12130},
label: "name",
expr: &ruleRefExpr{
pos: position{line: 389, col: 74, offset: 12135},
name: "DefinitionIdentifier",
},
},
&labeledExpr{
pos: position{line: 389, col: 95, offset: 12156},
label: "lpar",
expr: &ruleRefExpr{
pos: position{line: 389, col: 100, offset: 12161},
name: "LPAR",
},
},
&labeledExpr{
pos: position{line: 389, col: 105, offset: 12166},
label: "args",
expr: &zeroOrMoreExpr{
pos: position{line: 389, col: 110, offset: 12171},
expr: &ruleRefExpr{
pos: position{line: 389, col: 110, offset: 12171},
name: "FunctionFieldWithThrow",
},
},
},
&labeledExpr{
pos: position{line: 389, col: 134, offset: 12195},
label: "rpar",
expr: &ruleRefExpr{
pos: position{line: 389, col: 139, offset: 12200},
name: "RPAR",
},
},
&labeledExpr{
pos: position{line: 389, col: 144, offset: 12205},
label: "throws",
expr: &zeroOrOneExpr{
pos: position{line: 389, col: 151, offset: 12212},
expr: &ruleRefExpr{
pos: position{line: 389, col: 151, offset: 12212},
name: "Throws",
},
},
},
&labeledExpr{
pos: position{line: 389, col: 159, offset: 12220},
label: "annos",
expr: &zeroOrOneExpr{
pos: position{line: 389, col: 165, offset: 12226},
expr: &ruleRefExpr{
pos: position{line: 389, col: 165, offset: 12226},
name: "Annotations",
},
},
},
&labeledExpr{
pos: position{line: 389, col: 178, offset: 12239},
label: "sep",
expr: &zeroOrOneExpr{
pos: position{line: 389, col: 182, offset: 12243},
expr: &ruleRefExpr{
pos: position{line: 389, col: 182, offset: 12243},
name: "ListSeparator",
},
},
},
&labeledExpr{
pos: position{line: 389, col: 197, offset: 12258},
label: "endLineComments",
expr: &ruleRefExpr{
pos: position{line: 389, col: 213, offset: 12274},
name: "ReservedEndLineComments",
},
},
},
},
},
&actionExpr{
pos: position{line: 409, col: 5, offset: 12924},
run: (*parser).callonFunction33,
expr: &labeledExpr{
pos: position{line: 409, col: 5, offset: 12924},
label: "x",
expr: &seqExpr{
pos: position{line: 409, col: 8, offset: 12927},
exprs: []any{
&ruleRefExpr{
pos: position{line: 409, col: 8, offset: 12927},
name: "ReservedComments",
},
&andExpr{
pos: position{line: 409, col: 25, offset: 12944},
expr: &seqExpr{
pos: position{line: 409, col: 27, offset: 12946},
exprs: []any{
&labeledExpr{
pos: position{line: 409, col: 27, offset: 12946},
label: "oneway",
expr: &zeroOrOneExpr{
pos: position{line: 409, col: 34, offset: 12953},
expr: &ruleRefExpr{
pos: position{line: 409, col: 34, offset: 12953},
name: "ONEWAY",
},
},
},
&labeledExpr{
pos: position{line: 409, col: 42, offset: 12961},
label: "ft",
expr: &ruleRefExpr{
pos: position{line: 409, col: 45, offset: 12964},
name: "FunctionType",
},
},
},
},
},
&throwExpr{
pos: position{line: 409, col: 59, offset: 12978},
label: "errFunction",
},
},
},
},
},
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 411, col: 21, offset: 13042},
name: "ErrFunctionIdentifier",
},
failureLabel: []string{
"errIdentifier",
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 411, col: 56, offset: 13077},
name: "ErrFunctionArgument",
},
failureLabel: []string{
"errField",
},
},
},
{
name: "FunctionFieldWithThrow",
pos: position{line: 413, col: 1, offset: 13098},
expr: &choiceExpr{
pos: position{line: 413, col: 26, offset: 13123},
alternatives: []any{
&actionExpr{
pos: position{line: 413, col: 26, offset: 13123},
run: (*parser).callonFunctionFieldWithThrow2,
expr: &labeledExpr{
pos: position{line: 413, col: 26, offset: 13123},
label: "v",
expr: &ruleRefExpr{
pos: position{line: 413, col: 28, offset: 13125},
name: "Field",
},
},
},
&actionExpr{
pos: position{line: 415, col: 6, offset: 13153},
run: (*parser).callonFunctionFieldWithThrow5,
expr: &labeledExpr{
pos: position{line: 415, col: 6, offset: 13153},
label: "x",
expr: &seqExpr{
pos: position{line: 415, col: 9, offset: 13156},
exprs: []any{
&labeledExpr{
pos: position{line: 415, col: 9, offset: 13156},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 415, col: 18, offset: 13165},
name: "ReservedComments",
},
},
&andExpr{
pos: position{line: 415, col: 35, offset: 13182},
expr: &seqExpr{
pos: position{line: 415, col: 37, offset: 13184},
exprs: []any{
&labeledExpr{
pos: position{line: 415, col: 37, offset: 13184},
label: "index",
expr: &ruleRefExpr{
pos: position{line: 415, col: 43, offset: 13190},
name: "FieldId",
},
},
&labeledExpr{
pos: position{line: 415, col: 51, offset: 13198},
label: "required",
expr: &zeroOrOneExpr{
pos: position{line: 415, col: 60, offset: 13207},
expr: &ruleRefExpr{
pos: position{line: 415, col: 60, offset: 13207},
name: "FieldReq",
},
},
},
&labeledExpr{
pos: position{line: 415, col: 70, offset: 13217},
label: "fieldType",
expr: &ruleRefExpr{
pos: position{line: 415, col: 80, offset: 13227},
name: "FieldType",
},
},
},
},
},
&throwExpr{
pos: position{line: 415, col: 91, offset: 13238},
label: "errField",
},
},
},
},
},
},
},
},
{
name: "FunctionType",
pos: position{line: 420, col: 1, offset: 13284},
expr: &choiceExpr{
pos: position{line: 420, col: 18, offset: 13301},
alternatives: []any{
&ruleRefExpr{
pos: position{line: 420, col: 18, offset: 13301},
name: "VOID",
},
&ruleRefExpr{
pos: position{line: 420, col: 25, offset: 13308},
name: "FieldType",
},
},
},
},
{
name: "Throws",
pos: position{line: 422, col: 1, offset: 13319},
expr: &actionExpr{
pos: position{line: 422, col: 11, offset: 13329},
run: (*parser).callonThrows1,
expr: &seqExpr{
pos: position{line: 422, col: 11, offset: 13329},
exprs: []any{
&labeledExpr{
pos: position{line: 422, col: 11, offset: 13329},
label: "throws",
expr: &ruleRefExpr{
pos: position{line: 422, col: 18, offset: 13336},
name: "THROWS",
},
},
&labeledExpr{
pos: position{line: 422, col: 25, offset: 13343},
label: "lpar",
expr: &ruleRefExpr{
pos: position{line: 422, col: 30, offset: 13348},
name: "LPAR",
},
},
&labeledExpr{
pos: position{line: 422, col: 35, offset: 13353},
label: "fields",
expr: &zeroOrMoreExpr{
pos: position{line: 422, col: 42, offset: 13360},
expr: &ruleRefExpr{
pos: position{line: 422, col: 42, offset: 13360},
name: "Field",
},
},
},
&labeledExpr{
pos: position{line: 422, col: 49, offset: 13367},
label: "rpar",
expr: &ruleRefExpr{
pos: position{line: 422, col: 54, offset: 13372},
name: "RPAR",
},
},
},
},
},
},
{
name: "FieldType",
pos: position{line: 426, col: 1, offset: 13521},
expr: &actionExpr{
pos: position{line: 426, col: 13, offset: 13533},
run: (*parser).callonFieldType1,
expr: &seqExpr{
pos: position{line: 426, col: 13, offset: 13533},
exprs: []any{
&labeledExpr{
pos: position{line: 426, col: 13, offset: 13533},
label: "v",
expr: &choiceExpr{
pos: position{line: 426, col: 16, offset: 13536},
alternatives: []any{
&ruleRefExpr{
pos: position{line: 426, col: 16, offset: 13536},
name: "ContainerType",
},
&ruleRefExpr{
pos: position{line: 426, col: 32, offset: 13552},
name: "BaseType",
},
&ruleRefExpr{
pos: position{line: 426, col: 43, offset: 13563},
name: "IdentifierType",
},
},
},
},
&labeledExpr{
pos: position{line: 426, col: 59, offset: 13579},
label: "annos",
expr: &zeroOrOneExpr{
pos: position{line: 426, col: 65, offset: 13585},
expr: &ruleRefExpr{
pos: position{line: 426, col: 65, offset: 13585},
name: "Annotations",
},
},
},
},
},
},
},
{
name: "IdentifierType",
pos: position{line: 433, col: 1, offset: 13681},
expr: &actionExpr{
pos: position{line: 433, col: 18, offset: 13698},
run: (*parser).callonIdentifierType1,
expr: &labeledExpr{
pos: position{line: 433, col: 18, offset: 13698},
label: "v",
expr: &ruleRefExpr{
pos: position{line: 433, col: 20, offset: 13700},
name: "Identifier",
},
},
},
},
{
name: "BaseType",
pos: position{line: 437, col: 1, offset: 13759},
expr: &actionExpr{
pos: position{line: 437, col: 12, offset: 13770},
run: (*parser).callonBaseType1,
expr: &labeledExpr{
pos: position{line: 437, col: 12, offset: 13770},
label: "v",
expr: &choiceExpr{
pos: position{line: 437, col: 15, offset: 13773},
alternatives: []any{
&ruleRefExpr{
pos: position{line: 437, col: 15, offset: 13773},
name: "BOOL",
},
&ruleRefExpr{
pos: position{line: 437, col: 22, offset: 13780},
name: "BYTE",
},
&ruleRefExpr{
pos: position{line: 437, col: 29, offset: 13787},
name: "I8",
},
&ruleRefExpr{
pos: position{line: 437, col: 34, offset: 13792},
name: "I16",
},
&ruleRefExpr{
pos: position{line: 437, col: 40, offset: 13798},
name: "I32",
},
&ruleRefExpr{
pos: position{line: 437, col: 46, offset: 13804},
name: "I64",
},
&ruleRefExpr{
pos: position{line: 437, col: 52, offset: 13810},
name: "DOUBLE",
},
&ruleRefExpr{
pos: position{line: 437, col: 61, offset: 13819},
name: "STRING",
},
&ruleRefExpr{
pos: position{line: 437, col: 70, offset: 13828},
name: "BINARY",
},
&ruleRefExpr{
pos: position{line: 437, col: 79, offset: 13837},
name: "UUID",
},
},
},
},
},
},
{
name: "ContainerType",
pos: position{line: 441, col: 1, offset: 13946},
expr: &actionExpr{
pos: position{line: 441, col: 17, offset: 13962},
run: (*parser).callonContainerType1,
expr: &labeledExpr{
pos: position{line: 441, col: 17, offset: 13962},
label: "v",
expr: &choiceExpr{
pos: position{line: 441, col: 20, offset: 13965},
alternatives: []any{
&ruleRefExpr{
pos: position{line: 441, col: 20, offset: 13965},
name: "MapType",
},
&ruleRefExpr{
pos: position{line: 441, col: 30, offset: 13975},
name: "SetType",
},
&ruleRefExpr{
pos: position{line: 441, col: 40, offset: 13985},
name: "ListType",
},
},
},
},
},
},
{
name: "MapType",
pos: position{line: 445, col: 1, offset: 14028},
expr: &actionExpr{
pos: position{line: 445, col: 12, offset: 14039},
run: (*parser).callonMapType1,
expr: &seqExpr{
pos: position{line: 445, col: 12, offset: 14039},
exprs: []any{
&labeledExpr{
pos: position{line: 445, col: 12, offset: 14039},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 445, col: 14, offset: 14041},
name: "MAP",
},
},
&labeledExpr{
pos: position{line: 445, col: 18, offset: 14045},
label: "cpp",
expr: &zeroOrOneExpr{
pos: position{line: 445, col: 22, offset: 14049},
expr: &ruleRefExpr{
pos: position{line: 445, col: 22, offset: 14049},
name: "CppType",
},
},
},
&labeledExpr{
pos: position{line: 445, col: 31, offset: 14058},
label: "lp",
expr: &ruleRefExpr{
pos: position{line: 445, col: 34, offset: 14061},
name: "LPOINT",
},
},
&labeledExpr{
pos: position{line: 445, col: 41, offset: 14068},
label: "key",
expr: &ruleRefExpr{
pos: position{line: 445, col: 45, offset: 14072},
name: "FieldType",
},
},
&labeledExpr{
pos: position{line: 445, col: 55, offset: 14082},
label: "comma",
expr: &ruleRefExpr{
pos: position{line: 445, col: 61, offset: 14088},
name: "COMMA",
},
},
&labeledExpr{
pos: position{line: 445, col: 67, offset: 14094},
label: "value",
expr: &ruleRefExpr{
pos: position{line: 445, col: 73, offset: 14100},
name: "FieldType",
},
},
&labeledExpr{
pos: position{line: 445, col: 83, offset: 14110},
label: "rp",
expr: &ruleRefExpr{
pos: position{line: 445, col: 86, offset: 14113},
name: "RPOINT",
},
},
},
},
},
},
{
name: "SetType",
pos: position{line: 455, col: 1, offset: 14377},
expr: &actionExpr{
pos: position{line: 455, col: 11, offset: 14387},
run: (*parser).callonSetType1,
expr: &seqExpr{
pos: position{line: 455, col: 11, offset: 14387},
exprs: []any{
&labeledExpr{
pos: position{line: 455, col: 11, offset: 14387},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 455, col: 13, offset: 14389},
name: "SET",
},
},
&labeledExpr{
pos: position{line: 455, col: 17, offset: 14393},
label: "cpp",
expr: &zeroOrOneExpr{
pos: position{line: 455, col: 21, offset: 14397},
expr: &ruleRefExpr{
pos: position{line: 455, col: 21, offset: 14397},
name: "CppType",
},
},
},
&labeledExpr{
pos: position{line: 455, col: 30, offset: 14406},
label: "lp",
expr: &ruleRefExpr{
pos: position{line: 455, col: 33, offset: 14409},
name: "LPOINT",
},
},
&labeledExpr{
pos: position{line: 455, col: 40, offset: 14416},
label: "key",
expr: &ruleRefExpr{
pos: position{line: 455, col: 44, offset: 14420},
name: "FieldType",
},
},
&labeledExpr{
pos: position{line: 455, col: 54, offset: 14430},
label: "rp",
expr: &ruleRefExpr{
pos: position{line: 455, col: 57, offset: 14433},
name: "RPOINT",
},
},
},
},
},
},
{
name: "ListType",
pos: position{line: 464, col: 1, offset: 14662},
expr: &actionExpr{
pos: position{line: 464, col: 12, offset: 14673},
run: (*parser).callonListType1,
expr: &seqExpr{
pos: position{line: 464, col: 12, offset: 14673},
exprs: []any{
&labeledExpr{
pos: position{line: 464, col: 12, offset: 14673},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 464, col: 14, offset: 14675},
name: "LIST",
},
},
&labeledExpr{
pos: position{line: 464, col: 19, offset: 14680},
label: "lp",
expr: &ruleRefExpr{
pos: position{line: 464, col: 22, offset: 14683},
name: "LPOINT",
},
},
&labeledExpr{
pos: position{line: 464, col: 29, offset: 14690},
label: "key",
expr: &ruleRefExpr{
pos: position{line: 464, col: 33, offset: 14694},
name: "FieldType",
},
},
&labeledExpr{
pos: position{line: 464, col: 43, offset: 14704},
label: "rp",
expr: &ruleRefExpr{
pos: position{line: 464, col: 46, offset: 14707},
name: "RPOINT",
},
},
&labeledExpr{
pos: position{line: 464, col: 53, offset: 14714},
label: "cpp",
expr: &zeroOrOneExpr{
pos: position{line: 464, col: 57, offset: 14718},
expr: &ruleRefExpr{
pos: position{line: 464, col: 57, offset: 14718},
name: "CppType",
},
},
},
},
},
},
},
{
name: "CppType",
pos: position{line: 473, col: 1, offset: 14949},
expr: &actionExpr{
pos: position{line: 473, col: 11, offset: 14959},
run: (*parser).callonCppType1,
expr: &seqExpr{
pos: position{line: 473, col: 11, offset: 14959},
exprs: []any{
&labeledExpr{
pos: position{line: 473, col: 11, offset: 14959},
label: "cpp",
expr: &ruleRefExpr{
pos: position{line: 473, col: 15, offset: 14963},
name: "CPPTYPE",
},
},
&labeledExpr{
pos: position{line: 473, col: 23, offset: 14971},
label: "l",
expr: &ruleRefExpr{
pos: position{line: 473, col: 25, offset: 14973},
name: "Literal",
},
},
},
},
},
},
{
name: "ConstValue",
pos: position{line: 477, col: 1, offset: 15074},
expr: &actionExpr{
pos: position{line: 477, col: 14, offset: 15087},
run: (*parser).callonConstValue1,
expr: &labeledExpr{
pos: position{line: 477, col: 14, offset: 15087},
label: "v",
expr: &choiceExpr{
pos: position{line: 477, col: 17, offset: 15090},
alternatives: []any{
&ruleRefExpr{
pos: position{line: 477, col: 17, offset: 15090},
name: "DoubleConstant",
},
&ruleRefExpr{
pos: position{line: 477, col: 34, offset: 15107},
name: "IntConstant",
},
&ruleRefExpr{
pos: position{line: 477, col: 48, offset: 15121},
name: "Literal",
},
&ruleRefExpr{
pos: position{line: 477, col: 58, offset: 15131},
name: "IdentifierConst",
},
&ruleRefExpr{
pos: position{line: 477, col: 76, offset: 15149},
name: "ConstMap",
},
&ruleRefExpr{
pos: position{line: 477, col: 87, offset: 15160},
name: "ConstList",
},
},
},
},
},
},
{
name: "IdentifierConst",
pos: position{line: 484, col: 1, offset: 15321},
expr: &actionExpr{
pos: position{line: 484, col: 19, offset: 15339},
run: (*parser).callonIdentifierConst1,
expr: &seqExpr{
pos: position{line: 484, col: 19, offset: 15339},
exprs: []any{
&labeledExpr{
pos: position{line: 484, col: 19, offset: 15339},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 484, col: 28, offset: 15348},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 484, col: 45, offset: 15365},
label: "cv",
expr: &ruleRefExpr{
pos: position{line: 484, col: 48, offset: 15368},
name: "IdentifierConstValue",
},
},
},
},
},
},
{
name: "IdentifierConstValue",
pos: position{line: 490, col: 1, offset: 15480},
expr: &actionExpr{
pos: position{line: 490, col: 24, offset: 15503},
run: (*parser).callonIdentifierConstValue1,
expr: &labeledExpr{
pos: position{line: 490, col: 24, offset: 15503},
label: "id",
expr: &ruleRefExpr{
pos: position{line: 490, col: 27, offset: 15506},
name: "Identifier",
},
},
},
},
{
name: "EnumValueIntConstant",
pos: position{line: 495, col: 1, offset: 15628},
expr: &choiceExpr{
pos: position{line: 495, col: 24, offset: 15651},
alternatives: []any{
&actionExpr{
pos: position{line: 495, col: 24, offset: 15651},
run: (*parser).callonEnumValueIntConstant2,
expr: &labeledExpr{
pos: position{line: 495, col: 24, offset: 15651},
label: "v",
expr: &seqExpr{
pos: position{line: 495, col: 27, offset: 15654},
exprs: []any{
&ruleRefExpr{
pos: position{line: 495, col: 27, offset: 15654},
name: "EQUAL",
},
&ruleRefExpr{
pos: position{line: 495, col: 33, offset: 15660},
name: "IntConstant",
},
},
},
},
},
&actionExpr{
pos: position{line: 497, col: 5, offset: 15694},
run: (*parser).callonEnumValueIntConstant7,
expr: &labeledExpr{
pos: position{line: 497, col: 5, offset: 15694},
label: "x",
expr: &seqExpr{
pos: position{line: 497, col: 8, offset: 15697},
exprs: []any{
&ruleRefExpr{
pos: position{line: 497, col: 8, offset: 15697},
name: "EQUAL",
},
&ruleRefExpr{
pos: position{line: 497, col: 14, offset: 15703},
name: "ReservedComments",
},
&throwExpr{
pos: position{line: 497, col: 31, offset: 15720},
label: "errIntConstant",
},
&zeroOrMoreExpr{
pos: position{line: 497, col: 49, offset: 15738},
expr: &ruleRefExpr{
pos: position{line: 497, col: 49, offset: 15738},
name: "Indent",
},
},
},
},
},
},
},
},
},
{
name: "IntConstant",
pos: position{line: 501, col: 1, offset: 15799},
expr: &choiceExpr{
pos: position{line: 501, col: 15, offset: 15813},
alternatives: []any{
&actionExpr{
pos: position{line: 501, col: 15, offset: 15813},
run: (*parser).callonIntConstant2,
expr: &seqExpr{
pos: position{line: 501, col: 15, offset: 15813},
exprs: []any{
&labeledExpr{
pos: position{line: 501, col: 15, offset: 15813},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 501, col: 24, offset: 15822},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 501, col: 42, offset: 15840},
label: "v",
expr: &choiceExpr{
pos: position{line: 501, col: 45, offset: 15843},
alternatives: []any{
&ruleRefExpr{
pos: position{line: 501, col: 45, offset: 15843},
name: "HexIntConstant",
},
&ruleRefExpr{
pos: position{line: 501, col: 62, offset: 15860},
name: "OctIntConstant",
},
&ruleRefExpr{
pos: position{line: 501, col: 79, offset: 15877},
name: "NormalIntConstant",
},
},
},
},
¬Expr{
pos: position{line: 501, col: 98, offset: 15896},
expr: &charClassMatcher{
pos: position{line: 501, col: 99, offset: 15897},
val: "[a-zA-Z]",
ranges: []rune{'a', 'z', 'A', 'Z'},
ignoreCase: false,
inverted: false,
},
},
&zeroOrMoreExpr{
pos: position{line: 501, col: 109, offset: 15907},
expr: &ruleRefExpr{
pos: position{line: 501, col: 109, offset: 15907},
name: "Indent",
},
},
},
},
},
&actionExpr{
pos: position{line: 506, col: 5, offset: 16000},
run: (*parser).callonIntConstant15,
expr: &labeledExpr{
pos: position{line: 506, col: 5, offset: 16000},
label: "x",
expr: &seqExpr{
pos: position{line: 506, col: 8, offset: 16003},
exprs: []any{
&ruleRefExpr{
pos: position{line: 506, col: 8, offset: 16003},
name: "ReservedComments",
},
&andExpr{
pos: position{line: 506, col: 25, offset: 16020},
expr: &choiceExpr{
pos: position{line: 506, col: 27, offset: 16022},
alternatives: []any{
&litMatcher{
pos: position{line: 506, col: 27, offset: 16022},
val: "0x",
ignoreCase: false,
want: "\"0x\"",
},
&litMatcher{
pos: position{line: 506, col: 34, offset: 16029},
val: "0o",
ignoreCase: false,
want: "\"0o\"",
},
&seqExpr{
pos: position{line: 506, col: 42, offset: 16037},
exprs: []any{
&zeroOrOneExpr{
pos: position{line: 506, col: 42, offset: 16037},
expr: &choiceExpr{
pos: position{line: 506, col: 43, offset: 16038},
alternatives: []any{
&litMatcher{
pos: position{line: 506, col: 43, offset: 16038},
val: "+",
ignoreCase: false,
want: "\"+\"",
},
&litMatcher{
pos: position{line: 506, col: 49, offset: 16044},
val: "-",
ignoreCase: false,
want: "\"-\"",
},
},
},
},
&ruleRefExpr{
pos: position{line: 506, col: 55, offset: 16050},
name: "Digit",
},
},
},
},
},
},
&throwExpr{
pos: position{line: 506, col: 63, offset: 16058},
label: "errIntConstant",
},
},
},
},
},
},
},
},
{
name: "HexIntConstant",
pos: position{line: 510, col: 1, offset: 16108},
expr: &actionExpr{
pos: position{line: 510, col: 18, offset: 16125},
run: (*parser).callonHexIntConstant1,
expr: &seqExpr{
pos: position{line: 510, col: 18, offset: 16125},
exprs: []any{
&litMatcher{
pos: position{line: 510, col: 18, offset: 16125},
val: "0x",
ignoreCase: false,
want: "\"0x\"",
},
&oneOrMoreExpr{
pos: position{line: 510, col: 23, offset: 16130},
expr: &choiceExpr{
pos: position{line: 510, col: 24, offset: 16131},
alternatives: []any{
&charClassMatcher{
pos: position{line: 510, col: 24, offset: 16131},
val: "[0-9]",
ranges: []rune{'0', '9'},
ignoreCase: false,
inverted: false,
},
&charClassMatcher{
pos: position{line: 510, col: 32, offset: 16139},
val: "[A-Z]",
ranges: []rune{'A', 'Z'},
ignoreCase: false,
inverted: false,
},
&charClassMatcher{
pos: position{line: 510, col: 40, offset: 16147},
val: "[a-z]",
ranges: []rune{'a', 'z'},
ignoreCase: false,
inverted: false,
},
},
},
},
},
},
},
},
{
name: "OctIntConstant",
pos: position{line: 522, col: 1, offset: 16392},
expr: &actionExpr{
pos: position{line: 522, col: 18, offset: 16409},
run: (*parser).callonOctIntConstant1,
expr: &seqExpr{
pos: position{line: 522, col: 18, offset: 16409},
exprs: []any{
&litMatcher{
pos: position{line: 522, col: 18, offset: 16409},
val: "0o",
ignoreCase: false,
want: "\"0o\"",
},
&oneOrMoreExpr{
pos: position{line: 522, col: 23, offset: 16414},
expr: &ruleRefExpr{
pos: position{line: 522, col: 23, offset: 16414},
name: "Digit",
},
},
},
},
},
},
{
name: "NormalIntConstant",
pos: position{line: 533, col: 1, offset: 16649},
expr: &actionExpr{
pos: position{line: 533, col: 21, offset: 16669},
run: (*parser).callonNormalIntConstant1,
expr: &seqExpr{
pos: position{line: 533, col: 21, offset: 16669},
exprs: []any{
&zeroOrOneExpr{
pos: position{line: 533, col: 21, offset: 16669},
expr: &choiceExpr{
pos: position{line: 533, col: 22, offset: 16670},
alternatives: []any{
&litMatcher{
pos: position{line: 533, col: 22, offset: 16670},
val: "+",
ignoreCase: false,
want: "\"+\"",
},
&litMatcher{
pos: position{line: 533, col: 28, offset: 16676},
val: "-",
ignoreCase: false,
want: "\"-\"",
},
},
},
},
&oneOrMoreExpr{
pos: position{line: 533, col: 34, offset: 16682},
expr: &ruleRefExpr{
pos: position{line: 533, col: 34, offset: 16682},
name: "Digit",
},
},
},
},
},
},
{
name: "FieldIndex",
pos: position{line: 544, col: 1, offset: 16892},
expr: &choiceExpr{
pos: position{line: 544, col: 14, offset: 16905},
alternatives: []any{
&actionExpr{
pos: position{line: 544, col: 14, offset: 16905},
run: (*parser).callonFieldIndex2,
expr: &oneOrMoreExpr{
pos: position{line: 544, col: 14, offset: 16905},
expr: &ruleRefExpr{
pos: position{line: 544, col: 14, offset: 16905},
name: "Digit",
},
},
},
&actionExpr{
pos: position{line: 550, col: 5, offset: 17080},
run: (*parser).callonFieldIndex5,
expr: &labeledExpr{
pos: position{line: 550, col: 5, offset: 17080},
label: "x",
expr: &seqExpr{
pos: position{line: 550, col: 8, offset: 17083},
exprs: []any{
&ruleRefExpr{
pos: position{line: 550, col: 8, offset: 17083},
name: "ReservedComments",
},
&andExpr{
pos: position{line: 550, col: 25, offset: 17100},
expr: &seqExpr{
pos: position{line: 550, col: 27, offset: 17102},
exprs: []any{
&oneOrMoreExpr{
pos: position{line: 550, col: 27, offset: 17102},
expr: &charClassMatcher{
pos: position{line: 550, col: 27, offset: 17102},
val: "[a-zA-Z]",
ranges: []rune{'a', 'z', 'A', 'Z'},
ignoreCase: false,
inverted: false,
},
},
&ruleRefExpr{
pos: position{line: 550, col: 37, offset: 17112},
name: "COLON",
},
},
},
},
&throwExpr{
pos: position{line: 550, col: 44, offset: 17119},
label: "errFieldIndex",
},
},
},
},
},
},
},
},
{
name: "DoubleConstant",
pos: position{line: 554, col: 1, offset: 17168},
expr: &actionExpr{
pos: position{line: 554, col: 19, offset: 17186},
run: (*parser).callonDoubleConstant1,
expr: &seqExpr{
pos: position{line: 554, col: 19, offset: 17186},
exprs: []any{
&labeledExpr{
pos: position{line: 554, col: 19, offset: 17186},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 554, col: 28, offset: 17195},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 554, col: 45, offset: 17212},
label: "v",
expr: &ruleRefExpr{
pos: position{line: 554, col: 47, offset: 17214},
name: "DoubleConstantValue",
},
},
&zeroOrMoreExpr{
pos: position{line: 554, col: 67, offset: 17234},
expr: &ruleRefExpr{
pos: position{line: 554, col: 67, offset: 17234},
name: "Indent",
},
},
},
},
},
},
{
name: "DoubleConstantValue",
pos: position{line: 561, col: 1, offset: 17326},
expr: &actionExpr{
pos: position{line: 561, col: 23, offset: 17348},
run: (*parser).callonDoubleConstantValue1,
expr: &seqExpr{
pos: position{line: 561, col: 23, offset: 17348},
exprs: []any{
&zeroOrOneExpr{
pos: position{line: 561, col: 23, offset: 17348},
expr: &choiceExpr{
pos: position{line: 561, col: 24, offset: 17349},
alternatives: []any{
&litMatcher{
pos: position{line: 561, col: 24, offset: 17349},
val: "+",
ignoreCase: false,
want: "\"+\"",
},
&litMatcher{
pos: position{line: 561, col: 30, offset: 17355},
val: "-",
ignoreCase: false,
want: "\"-\"",
},
},
},
},
&choiceExpr{
pos: position{line: 561, col: 37, offset: 17362},
alternatives: []any{
&seqExpr{
pos: position{line: 561, col: 37, offset: 17362},
exprs: []any{
&zeroOrMoreExpr{
pos: position{line: 561, col: 37, offset: 17362},
expr: &ruleRefExpr{
pos: position{line: 561, col: 37, offset: 17362},
name: "Digit",
},
},
&litMatcher{
pos: position{line: 561, col: 44, offset: 17369},
val: ".",
ignoreCase: false,
want: "\".\"",
},
&oneOrMoreExpr{
pos: position{line: 561, col: 48, offset: 17373},
expr: &ruleRefExpr{
pos: position{line: 561, col: 48, offset: 17373},
name: "Digit",
},
},
&zeroOrOneExpr{
pos: position{line: 561, col: 56, offset: 17381},
expr: &ruleRefExpr{
pos: position{line: 561, col: 56, offset: 17381},
name: "Exponent",
},
},
},
},
&seqExpr{
pos: position{line: 561, col: 68, offset: 17393},
exprs: []any{
&oneOrMoreExpr{
pos: position{line: 561, col: 68, offset: 17393},
expr: &ruleRefExpr{
pos: position{line: 561, col: 68, offset: 17393},
name: "Digit",
},
},
&ruleRefExpr{
pos: position{line: 561, col: 75, offset: 17400},
name: "Exponent",
},
},
},
},
},
},
},
},
},
{
name: "Exponent",
pos: position{line: 572, col: 1, offset: 17622},
expr: &seqExpr{
pos: position{line: 572, col: 12, offset: 17633},
exprs: []any{
&choiceExpr{
pos: position{line: 572, col: 13, offset: 17634},
alternatives: []any{
&litMatcher{
pos: position{line: 572, col: 13, offset: 17634},
val: "e",
ignoreCase: false,
want: "\"e\"",
},
&litMatcher{
pos: position{line: 572, col: 19, offset: 17640},
val: "E",
ignoreCase: false,
want: "\"E\"",
},
},
},
&ruleRefExpr{
pos: position{line: 572, col: 24, offset: 17645},
name: "IntConstant",
},
},
},
},
{
name: "Annotations",
pos: position{line: 574, col: 1, offset: 17658},
expr: &actionExpr{
pos: position{line: 574, col: 16, offset: 17673},
run: (*parser).callonAnnotations1,
expr: &seqExpr{
pos: position{line: 574, col: 16, offset: 17673},
exprs: []any{
&labeledExpr{
pos: position{line: 574, col: 16, offset: 17673},
label: "lpar",
expr: &ruleRefExpr{
pos: position{line: 574, col: 21, offset: 17678},
name: "LPAR",
},
},
&labeledExpr{
pos: position{line: 574, col: 26, offset: 17683},
label: "annos",
expr: &oneOrMoreExpr{
pos: position{line: 574, col: 32, offset: 17689},
expr: &ruleRefExpr{
pos: position{line: 574, col: 32, offset: 17689},
name: "Annotation",
},
},
},
&labeledExpr{
pos: position{line: 574, col: 44, offset: 17701},
label: "rpar",
expr: &ruleRefExpr{
pos: position{line: 574, col: 49, offset: 17706},
name: "RPAR",
},
},
},
},
},
},
{
name: "Annotation",
pos: position{line: 578, col: 1, offset: 17839},
expr: &actionExpr{
pos: position{line: 578, col: 15, offset: 17853},
run: (*parser).callonAnnotation1,
expr: &seqExpr{
pos: position{line: 578, col: 15, offset: 17853},
exprs: []any{
&labeledExpr{
pos: position{line: 578, col: 15, offset: 17853},
label: "id",
expr: &ruleRefExpr{
pos: position{line: 578, col: 18, offset: 17856},
name: "Identifier",
},
},
&labeledExpr{
pos: position{line: 578, col: 29, offset: 17867},
label: "eq",
expr: &ruleRefExpr{
pos: position{line: 578, col: 32, offset: 17870},
name: "EQUAL",
},
},
&labeledExpr{
pos: position{line: 578, col: 38, offset: 17876},
label: "value",
expr: &ruleRefExpr{
pos: position{line: 578, col: 44, offset: 17882},
name: "Literal",
},
},
&labeledExpr{
pos: position{line: 578, col: 52, offset: 17890},
label: "sep",
expr: &zeroOrOneExpr{
pos: position{line: 578, col: 56, offset: 17894},
expr: &ruleRefExpr{
pos: position{line: 578, col: 56, offset: 17894},
name: "ListSeparator",
},
},
},
},
},
},
},
{
name: "ConstList",
pos: position{line: 582, col: 1, offset: 18053},
expr: &actionExpr{
pos: position{line: 582, col: 14, offset: 18066},
run: (*parser).callonConstList1,
expr: &seqExpr{
pos: position{line: 582, col: 14, offset: 18066},
exprs: []any{
&labeledExpr{
pos: position{line: 582, col: 14, offset: 18066},
label: "lbrk",
expr: &ruleRefExpr{
pos: position{line: 582, col: 19, offset: 18071},
name: "LBRK",
},
},
&labeledExpr{
pos: position{line: 582, col: 24, offset: 18076},
label: "v",
expr: &zeroOrMoreExpr{
pos: position{line: 582, col: 26, offset: 18078},
expr: &ruleRefExpr{
pos: position{line: 582, col: 26, offset: 18078},
name: "ConstListItem",
},
},
},
&labeledExpr{
pos: position{line: 582, col: 41, offset: 18093},
label: "rbrk",
expr: &ruleRefExpr{
pos: position{line: 582, col: 46, offset: 18098},
name: "RBRK",
},
},
},
},
},
},
{
name: "ConstListItem",
pos: position{line: 591, col: 1, offset: 18280},
expr: &actionExpr{
pos: position{line: 591, col: 17, offset: 18296},
run: (*parser).callonConstListItem1,
expr: &seqExpr{
pos: position{line: 591, col: 17, offset: 18296},
exprs: []any{
&labeledExpr{
pos: position{line: 591, col: 17, offset: 18296},
label: "v",
expr: &ruleRefExpr{
pos: position{line: 591, col: 19, offset: 18298},
name: "ConstValue",
},
},
&labeledExpr{
pos: position{line: 591, col: 30, offset: 18309},
label: "sep",
expr: &zeroOrOneExpr{
pos: position{line: 591, col: 34, offset: 18313},
expr: &ruleRefExpr{
pos: position{line: 591, col: 34, offset: 18313},
name: "ListSeparator",
},
},
},
},
},
},
},
{
name: "ConstMap",
pos: position{line: 601, col: 1, offset: 18450},
expr: &actionExpr{
pos: position{line: 601, col: 13, offset: 18462},
run: (*parser).callonConstMap1,
expr: &seqExpr{
pos: position{line: 601, col: 13, offset: 18462},
exprs: []any{
&labeledExpr{
pos: position{line: 601, col: 13, offset: 18462},
label: "lcur",
expr: &ruleRefExpr{
pos: position{line: 601, col: 18, offset: 18467},
name: "LCUR",
},
},
&labeledExpr{
pos: position{line: 601, col: 23, offset: 18472},
label: "v",
expr: &zeroOrMoreExpr{
pos: position{line: 601, col: 25, offset: 18474},
expr: &ruleRefExpr{
pos: position{line: 601, col: 25, offset: 18474},
name: "ConstMapItem",
},
},
},
&labeledExpr{
pos: position{line: 601, col: 39, offset: 18488},
label: "rcur",
expr: &ruleRefExpr{
pos: position{line: 601, col: 44, offset: 18493},
name: "RCUR",
},
},
},
},
},
},
{
name: "ConstMapItem",
pos: position{line: 610, col: 1, offset: 18674},
expr: &actionExpr{
pos: position{line: 610, col: 16, offset: 18689},
run: (*parser).callonConstMapItem1,
expr: &seqExpr{
pos: position{line: 610, col: 16, offset: 18689},
exprs: []any{
&labeledExpr{
pos: position{line: 610, col: 16, offset: 18689},
label: "key",
expr: &ruleRefExpr{
pos: position{line: 610, col: 20, offset: 18693},
name: "ConstValue",
},
},
&labeledExpr{
pos: position{line: 610, col: 31, offset: 18704},
label: "colon",
expr: &ruleRefExpr{
pos: position{line: 610, col: 37, offset: 18710},
name: "COLON",
},
},
&labeledExpr{
pos: position{line: 610, col: 43, offset: 18716},
label: "value",
expr: &ruleRefExpr{
pos: position{line: 610, col: 49, offset: 18722},
name: "ConstValue",
},
},
&labeledExpr{
pos: position{line: 610, col: 60, offset: 18733},
label: "sep",
expr: &zeroOrOneExpr{
pos: position{line: 610, col: 64, offset: 18737},
expr: &ruleRefExpr{
pos: position{line: 610, col: 64, offset: 18737},
name: "ListSeparator",
},
},
},
},
},
},
},
{
name: "EscapeLiteralChar",
pos: position{line: 621, col: 1, offset: 18983},
expr: &actionExpr{
pos: position{line: 621, col: 21, offset: 19003},
run: (*parser).callonEscapeLiteralChar1,
expr: &seqExpr{
pos: position{line: 621, col: 21, offset: 19003},
exprs: []any{
&litMatcher{
pos: position{line: 621, col: 21, offset: 19003},
val: "\\",
ignoreCase: false,
want: "\"\\\\\"",
},
&charClassMatcher{
pos: position{line: 621, col: 26, offset: 19008},
val: "[\"']",
chars: []rune{'"', '\''},
ignoreCase: false,
inverted: false,
},
},
},
},
},
{
name: "Literal",
pos: position{line: 625, col: 1, offset: 19046},
expr: &recoveryExpr{
pos: position{line: 625, col: 11, offset: 19056},
expr: &recoveryExpr{
pos: position{line: 625, col: 11, offset: 19056},
expr: &recoveryExpr{
pos: position{line: 625, col: 11, offset: 19056},
expr: &recoveryExpr{
pos: position{line: 625, col: 11, offset: 19056},
expr: &actionExpr{
pos: position{line: 625, col: 11, offset: 19056},
run: (*parser).callonLiteral5,
expr: &labeledExpr{
pos: position{line: 625, col: 11, offset: 19056},
label: "l",
expr: &choiceExpr{
pos: position{line: 625, col: 14, offset: 19059},
alternatives: []any{
&ruleRefExpr{
pos: position{line: 625, col: 14, offset: 19059},
name: "Literal1",
},
&ruleRefExpr{
pos: position{line: 625, col: 25, offset: 19070},
name: "Literal2",
},
},
},
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 627, col: 31, offset: 19127},
name: "ErrLiteral1MissingRight",
},
failureLabel: []string{
"errLiteral1MissingRight",
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 627, col: 71, offset: 19167},
name: "ErrLiteral1",
},
failureLabel: []string{
"errLiteral1",
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 627, col: 111, offset: 19207},
name: "ErrLiteral2MissingRight",
},
failureLabel: []string{
"errLiteral2MissingRight",
},
},
recoverExpr: &ruleRefExpr{
pos: position{line: 627, col: 151, offset: 19247},
name: "ErrLiteral2",
},
failureLabel: []string{
"errLiteral2",
},
},
},
{
name: "Literal1",
pos: position{line: 629, col: 1, offset: 19260},
expr: &choiceExpr{
pos: position{line: 629, col: 12, offset: 19271},
alternatives: []any{
&actionExpr{
pos: position{line: 629, col: 12, offset: 19271},
run: (*parser).callonLiteral12,
expr: &seqExpr{
pos: position{line: 629, col: 12, offset: 19271},
exprs: []any{
&labeledExpr{
pos: position{line: 629, col: 12, offset: 19271},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 629, col: 21, offset: 19280},
name: "ReservedComments",
},
},
&litMatcher{
pos: position{line: 629, col: 38, offset: 19297},
val: "\"",
ignoreCase: false,
want: "\"\\\"\"",
},
&labeledExpr{
pos: position{line: 629, col: 42, offset: 19301},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 629, col: 44, offset: 19303},
name: "Literal1Val",
},
},
&litMatcher{
pos: position{line: 629, col: 56, offset: 19315},
val: "\"",
ignoreCase: false,
want: "\"\\\"\"",
},
&zeroOrMoreExpr{
pos: position{line: 629, col: 60, offset: 19319},
expr: &ruleRefExpr{
pos: position{line: 629, col: 60, offset: 19319},
name: "Indent",
},
},
},
},
},
&actionExpr{
pos: position{line: 631, col: 5, offset: 19432},
run: (*parser).callonLiteral112,
expr: &labeledExpr{
pos: position{line: 631, col: 5, offset: 19432},
label: "x",
expr: &seqExpr{
pos: position{line: 631, col: 8, offset: 19435},
exprs: []any{
&andExpr{
pos: position{line: 631, col: 8, offset: 19435},
expr: &seqExpr{
pos: position{line: 631, col: 10, offset: 19437},
exprs: []any{
&ruleRefExpr{
pos: position{line: 631, col: 10, offset: 19437},
name: "ReservedComments",
},
&litMatcher{
pos: position{line: 631, col: 27, offset: 19454},
val: "\"",
ignoreCase: false,
want: "\"\\\"\"",
},
&labeledExpr{
pos: position{line: 631, col: 31, offset: 19458},
label: "t",
expr: &zeroOrMoreExpr{
pos: position{line: 631, col: 33, offset: 19460},
expr: &choiceExpr{
pos: position{line: 631, col: 34, offset: 19461},
alternatives: []any{
&ruleRefExpr{
pos: position{line: 631, col: 34, offset: 19461},
name: "EscapeLiteralChar",
},
&seqExpr{
pos: position{line: 631, col: 54, offset: 19481},
exprs: []any{
¬Expr{
pos: position{line: 631, col: 54, offset: 19481},
expr: &litMatcher{
pos: position{line: 631, col: 55, offset: 19482},
val: "\"",
ignoreCase: false,
want: "\"\\\"\"",
},
},
&anyMatcher{
line: 631, col: 59, offset: 19486,
},
},
},
},
},
},
},
&zeroOrMoreExpr{
pos: position{line: 631, col: 63, offset: 19490},
expr: &ruleRefExpr{
pos: position{line: 631, col: 63, offset: 19490},
name: "Indent",
},
},
},
},
},
&throwExpr{
pos: position{line: 631, col: 72, offset: 19499},
label: "errLiteral1MissingRight",
},
},
},
},
},
},
},
},
{
name: "Literal2",
pos: position{line: 635, col: 1, offset: 19560},
expr: &choiceExpr{
pos: position{line: 635, col: 12, offset: 19571},
alternatives: []any{
&actionExpr{
pos: position{line: 635, col: 12, offset: 19571},
run: (*parser).callonLiteral22,
expr: &seqExpr{
pos: position{line: 635, col: 12, offset: 19571},
exprs: []any{
&labeledExpr{
pos: position{line: 635, col: 12, offset: 19571},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 635, col: 21, offset: 19580},
name: "ReservedComments",
},
},
&litMatcher{
pos: position{line: 635, col: 38, offset: 19597},
val: "'",
ignoreCase: false,
want: "\"'\"",
},
&labeledExpr{
pos: position{line: 635, col: 42, offset: 19601},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 635, col: 44, offset: 19603},
name: "Literal2Val",
},
},
&litMatcher{
pos: position{line: 635, col: 56, offset: 19615},
val: "'",
ignoreCase: false,
want: "\"'\"",
},
&zeroOrMoreExpr{
pos: position{line: 635, col: 60, offset: 19619},
expr: &ruleRefExpr{
pos: position{line: 635, col: 60, offset: 19619},
name: "Indent",
},
},
},
},
},
&actionExpr{
pos: position{line: 637, col: 5, offset: 19731},
run: (*parser).callonLiteral212,
expr: &labeledExpr{
pos: position{line: 637, col: 5, offset: 19731},
label: "x",
expr: &seqExpr{
pos: position{line: 637, col: 8, offset: 19734},
exprs: []any{
&andExpr{
pos: position{line: 637, col: 8, offset: 19734},
expr: &seqExpr{
pos: position{line: 637, col: 10, offset: 19736},
exprs: []any{
&ruleRefExpr{
pos: position{line: 637, col: 10, offset: 19736},
name: "ReservedComments",
},
&litMatcher{
pos: position{line: 637, col: 27, offset: 19753},
val: "'",
ignoreCase: false,
want: "\"'\"",
},
&labeledExpr{
pos: position{line: 637, col: 31, offset: 19757},
label: "t",
expr: &zeroOrMoreExpr{
pos: position{line: 637, col: 33, offset: 19759},
expr: &choiceExpr{
pos: position{line: 637, col: 34, offset: 19760},
alternatives: []any{
&ruleRefExpr{
pos: position{line: 637, col: 34, offset: 19760},
name: "EscapeLiteralChar",
},
&seqExpr{
pos: position{line: 637, col: 54, offset: 19780},
exprs: []any{
¬Expr{
pos: position{line: 637, col: 54, offset: 19780},
expr: &litMatcher{
pos: position{line: 637, col: 55, offset: 19781},
val: "'",
ignoreCase: false,
want: "\"'\"",
},
},
&anyMatcher{
line: 637, col: 59, offset: 19785,
},
},
},
},
},
},
},
&zeroOrMoreExpr{
pos: position{line: 637, col: 63, offset: 19789},
expr: &ruleRefExpr{
pos: position{line: 637, col: 63, offset: 19789},
name: "Indent",
},
},
},
},
},
&throwExpr{
pos: position{line: 637, col: 72, offset: 19798},
label: "errLiteral2MissingRight",
},
},
},
},
},
},
},
},
{
name: "Literal1Val",
pos: position{line: 641, col: 1, offset: 19859},
expr: &actionExpr{
pos: position{line: 641, col: 15, offset: 19873},
run: (*parser).callonLiteral1Val1,
expr: &zeroOrMoreExpr{
pos: position{line: 641, col: 15, offset: 19873},
expr: &choiceExpr{
pos: position{line: 641, col: 16, offset: 19874},
alternatives: []any{
&ruleRefExpr{
pos: position{line: 641, col: 16, offset: 19874},
name: "EscapeLiteralChar",
},
&seqExpr{
pos: position{line: 641, col: 36, offset: 19894},
exprs: []any{
¬Expr{
pos: position{line: 641, col: 36, offset: 19894},
expr: &charClassMatcher{
pos: position{line: 641, col: 37, offset: 19895},
val: "[\"\\r\\n]",
chars: []rune{'"', '\r', '\n'},
ignoreCase: false,
inverted: false,
},
},
&anyMatcher{
line: 641, col: 45, offset: 19903,
},
},
},
},
},
},
},
},
{
name: "Literal2Val",
pos: position{line: 645, col: 1, offset: 19984},
expr: &actionExpr{
pos: position{line: 645, col: 15, offset: 19998},
run: (*parser).callonLiteral2Val1,
expr: &zeroOrMoreExpr{
pos: position{line: 645, col: 15, offset: 19998},
expr: &choiceExpr{
pos: position{line: 645, col: 16, offset: 19999},
alternatives: []any{
&ruleRefExpr{
pos: position{line: 645, col: 16, offset: 19999},
name: "EscapeLiteralChar",
},
&seqExpr{
pos: position{line: 645, col: 36, offset: 20019},
exprs: []any{
¬Expr{
pos: position{line: 645, col: 36, offset: 20019},
expr: &charClassMatcher{
pos: position{line: 645, col: 37, offset: 20020},
val: "['\\r\\n]",
chars: []rune{'\'', '\r', '\n'},
ignoreCase: false,
inverted: false,
},
},
&anyMatcher{
line: 645, col: 45, offset: 20028,
},
},
},
},
},
},
},
},
{
name: "DefinitionIdentifier",
pos: position{line: 649, col: 1, offset: 20109},
expr: &choiceExpr{
pos: position{line: 649, col: 24, offset: 20132},
alternatives: []any{
&actionExpr{
pos: position{line: 649, col: 24, offset: 20132},
run: (*parser).callonDefinitionIdentifier2,
expr: &labeledExpr{
pos: position{line: 649, col: 24, offset: 20132},
label: "id",
expr: &ruleRefExpr{
pos: position{line: 649, col: 27, offset: 20135},
name: "Identifier",
},
},
},
&throwExpr{
pos: position{line: 651, col: 5, offset: 20182},
label: "errIdentifier",
},
},
},
},
{
name: "Identifier",
pos: position{line: 653, col: 1, offset: 20200},
expr: &actionExpr{
pos: position{line: 653, col: 14, offset: 20213},
run: (*parser).callonIdentifier1,
expr: &seqExpr{
pos: position{line: 653, col: 14, offset: 20213},
exprs: []any{
&labeledExpr{
pos: position{line: 653, col: 14, offset: 20213},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 653, col: 23, offset: 20222},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 653, col: 40, offset: 20239},
label: "id",
expr: &ruleRefExpr{
pos: position{line: 653, col: 43, offset: 20242},
name: "IdentifierToken",
},
},
&zeroOrMoreExpr{
pos: position{line: 653, col: 59, offset: 20258},
expr: &ruleRefExpr{
pos: position{line: 653, col: 59, offset: 20258},
name: "Indent",
},
},
},
},
},
},
{
name: "IdentifierToken",
pos: position{line: 659, col: 1, offset: 20389},
expr: &actionExpr{
pos: position{line: 659, col: 19, offset: 20407},
run: (*parser).callonIdentifierToken1,
expr: &seqExpr{
pos: position{line: 659, col: 19, offset: 20407},
exprs: []any{
&ruleRefExpr{
pos: position{line: 659, col: 19, offset: 20407},
name: "Letter",
},
&zeroOrMoreExpr{
pos: position{line: 659, col: 26, offset: 20414},
expr: &choiceExpr{
pos: position{line: 659, col: 28, offset: 20416},
alternatives: []any{
&ruleRefExpr{
pos: position{line: 659, col: 28, offset: 20416},
name: "Letter",
},
&ruleRefExpr{
pos: position{line: 659, col: 37, offset: 20425},
name: "Digit",
},
&litMatcher{
pos: position{line: 659, col: 45, offset: 20433},
val: ".",
ignoreCase: false,
want: "\".\"",
},
},
},
},
},
},
},
},
{
name: "ListSeparator",
pos: position{line: 663, col: 1, offset: 20520},
expr: &actionExpr{
pos: position{line: 663, col: 17, offset: 20536},
run: (*parser).callonListSeparator1,
expr: &seqExpr{
pos: position{line: 663, col: 17, offset: 20536},
exprs: []any{
&labeledExpr{
pos: position{line: 663, col: 17, offset: 20536},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 663, col: 26, offset: 20545},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 663, col: 43, offset: 20562},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 663, col: 45, offset: 20564},
name: "ListSeparatorToken",
},
},
&zeroOrMoreExpr{
pos: position{line: 663, col: 64, offset: 20583},
expr: &ruleRefExpr{
pos: position{line: 663, col: 64, offset: 20583},
name: "Indent",
},
},
},
},
},
},
{
name: "ListSeparatorToken",
pos: position{line: 669, col: 1, offset: 20734},
expr: &actionExpr{
pos: position{line: 669, col: 22, offset: 20755},
run: (*parser).callonListSeparatorToken1,
expr: &choiceExpr{
pos: position{line: 669, col: 23, offset: 20756},
alternatives: []any{
&litMatcher{
pos: position{line: 669, col: 23, offset: 20756},
val: ",",
ignoreCase: false,
want: "\",\"",
},
&litMatcher{
pos: position{line: 669, col: 29, offset: 20762},
val: ";",
ignoreCase: false,
want: "\";\"",
},
},
},
},
},
{
name: "Letter",
pos: position{line: 673, col: 1, offset: 20806},
expr: &choiceExpr{
pos: position{line: 673, col: 10, offset: 20815},
alternatives: []any{
&charClassMatcher{
pos: position{line: 673, col: 10, offset: 20815},
val: "[A-Z]",
ranges: []rune{'A', 'Z'},
ignoreCase: false,
inverted: false,
},
&charClassMatcher{
pos: position{line: 673, col: 18, offset: 20823},
val: "[a-z]",
ranges: []rune{'a', 'z'},
ignoreCase: false,
inverted: false,
},
&actionExpr{
pos: position{line: 673, col: 26, offset: 20831},
run: (*parser).callonLetter4,
expr: &litMatcher{
pos: position{line: 673, col: 26, offset: 20831},
val: "_",
ignoreCase: false,
want: "\"_\"",
},
},
},
},
},
{
name: "LetterOrDigit",
pos: position{line: 676, col: 1, offset: 20867},
expr: &choiceExpr{
pos: position{line: 676, col: 17, offset: 20883},
alternatives: []any{
&charClassMatcher{
pos: position{line: 676, col: 17, offset: 20883},
val: "[a-z]",
ranges: []rune{'a', 'z'},
ignoreCase: false,
inverted: false,
},
&charClassMatcher{
pos: position{line: 676, col: 25, offset: 20891},
val: "[A-Z]",
ranges: []rune{'A', 'Z'},
ignoreCase: false,
inverted: false,
},
&charClassMatcher{
pos: position{line: 676, col: 33, offset: 20899},
val: "[0-9]",
ranges: []rune{'0', '9'},
ignoreCase: false,
inverted: false,
},
&actionExpr{
pos: position{line: 676, col: 41, offset: 20907},
run: (*parser).callonLetterOrDigit5,
expr: &charClassMatcher{
pos: position{line: 676, col: 41, offset: 20907},
val: "[_$]",
chars: []rune{'_', '$'},
ignoreCase: false,
inverted: false,
},
},
},
},
},
{
name: "Digit",
pos: position{line: 680, col: 1, offset: 20945},
expr: &actionExpr{
pos: position{line: 680, col: 9, offset: 20953},
run: (*parser).callonDigit1,
expr: &charClassMatcher{
pos: position{line: 680, col: 9, offset: 20953},
val: "[0-9]",
ranges: []rune{'0', '9'},
ignoreCase: false,
inverted: false,
},
},
},
{
name: "ReservedComments",
pos: position{line: 684, col: 1, offset: 20992},
expr: &actionExpr{
pos: position{line: 684, col: 20, offset: 21011},
run: (*parser).callonReservedComments1,
expr: &labeledExpr{
pos: position{line: 684, col: 20, offset: 21011},
label: "comments",
expr: &zeroOrMoreExpr{
pos: position{line: 684, col: 29, offset: 21020},
expr: &choiceExpr{
pos: position{line: 684, col: 30, offset: 21021},
alternatives: []any{
&ruleRefExpr{
pos: position{line: 684, col: 30, offset: 21021},
name: "Space",
},
&ruleRefExpr{
pos: position{line: 684, col: 38, offset: 21029},
name: "Comment",
},
},
},
},
},
},
},
{
name: "ReservedEndLineComments",
pos: position{line: 687, col: 1, offset: 21081},
expr: &actionExpr{
pos: position{line: 687, col: 27, offset: 21107},
run: (*parser).callonReservedEndLineComments1,
expr: &labeledExpr{
pos: position{line: 687, col: 27, offset: 21107},
label: "comments",
expr: &zeroOrMoreExpr{
pos: position{line: 687, col: 36, offset: 21116},
expr: &choiceExpr{
pos: position{line: 687, col: 37, offset: 21117},
alternatives: []any{
&ruleRefExpr{
pos: position{line: 687, col: 37, offset: 21117},
name: "Indent",
},
&ruleRefExpr{
pos: position{line: 687, col: 46, offset: 21126},
name: "Comment",
},
},
},
},
},
},
},
{
name: "Space",
pos: position{line: 691, col: 1, offset: 21179},
expr: &actionExpr{
pos: position{line: 691, col: 9, offset: 21187},
run: (*parser).callonSpace1,
expr: &oneOrMoreExpr{
pos: position{line: 691, col: 9, offset: 21187},
expr: &choiceExpr{
pos: position{line: 691, col: 10, offset: 21188},
alternatives: []any{
&ruleRefExpr{
pos: position{line: 691, col: 10, offset: 21188},
name: "Indent",
},
&ruleRefExpr{
pos: position{line: 691, col: 19, offset: 21197},
name: "CarriageReturnLineFeed",
},
},
},
},
},
},
{
name: "Indent",
pos: position{line: 694, col: 1, offset: 21242},
expr: &actionExpr{
pos: position{line: 694, col: 10, offset: 21251},
run: (*parser).callonIndent1,
expr: &charClassMatcher{
pos: position{line: 694, col: 10, offset: 21251},
val: "[ \\t\\v]",
chars: []rune{' ', '\t', '\v'},
ignoreCase: false,
inverted: false,
},
},
},
{
name: "CarriageReturnLineFeed",
pos: position{line: 697, col: 1, offset: 21279},
expr: &charClassMatcher{
pos: position{line: 697, col: 26, offset: 21304},
val: "[\\r\\n]",
chars: []rune{'\r', '\n'},
ignoreCase: false,
inverted: false,
},
},
{
name: "Comment",
pos: position{line: 699, col: 1, offset: 21312},
expr: &actionExpr{
pos: position{line: 699, col: 11, offset: 21322},
run: (*parser).callonComment1,
expr: &labeledExpr{
pos: position{line: 699, col: 11, offset: 21322},
label: "v",
expr: &choiceExpr{
pos: position{line: 699, col: 14, offset: 21325},
alternatives: []any{
&ruleRefExpr{
pos: position{line: 699, col: 14, offset: 21325},
name: "LongComment",
},
&ruleRefExpr{
pos: position{line: 699, col: 28, offset: 21339},
name: "LineComment",
},
&ruleRefExpr{
pos: position{line: 699, col: 42, offset: 21353},
name: "UnixComment",
},
},
},
},
},
},
{
name: "LongComment",
pos: position{line: 702, col: 1, offset: 21396},
expr: &actionExpr{
pos: position{line: 702, col: 15, offset: 21410},
run: (*parser).callonLongComment1,
expr: &seqExpr{
pos: position{line: 702, col: 15, offset: 21410},
exprs: []any{
&litMatcher{
pos: position{line: 702, col: 15, offset: 21410},
val: "/*",
ignoreCase: false,
want: "\"/*\"",
},
&ruleRefExpr{
pos: position{line: 702, col: 20, offset: 21415},
name: "LongCommentMatch",
},
&litMatcher{
pos: position{line: 702, col: 37, offset: 21432},
val: "*/",
ignoreCase: false,
want: "\"*/\"",
},
},
},
},
},
{
name: "LongCommentMatch",
pos: position{line: 705, col: 1, offset: 21531},
expr: &actionExpr{
pos: position{line: 705, col: 20, offset: 21550},
run: (*parser).callonLongCommentMatch1,
expr: &zeroOrMoreExpr{
pos: position{line: 705, col: 20, offset: 21550},
expr: &seqExpr{
pos: position{line: 705, col: 21, offset: 21551},
exprs: []any{
¬Expr{
pos: position{line: 705, col: 21, offset: 21551},
expr: &litMatcher{
pos: position{line: 705, col: 22, offset: 21552},
val: "*/",
ignoreCase: false,
want: "\"*/\"",
},
},
&anyMatcher{
line: 705, col: 27, offset: 21557,
},
},
},
},
},
},
{
name: "LineComment",
pos: position{line: 709, col: 1, offset: 21594},
expr: &actionExpr{
pos: position{line: 709, col: 15, offset: 21608},
run: (*parser).callonLineComment1,
expr: &seqExpr{
pos: position{line: 709, col: 15, offset: 21608},
exprs: []any{
&litMatcher{
pos: position{line: 709, col: 15, offset: 21608},
val: "//",
ignoreCase: false,
want: "\"//\"",
},
&ruleRefExpr{
pos: position{line: 709, col: 20, offset: 21613},
name: "LineCommentMatch",
},
},
},
},
},
{
name: "LineCommentMatch",
pos: position{line: 712, col: 1, offset: 21725},
expr: &actionExpr{
pos: position{line: 712, col: 20, offset: 21744},
run: (*parser).callonLineCommentMatch1,
expr: &zeroOrMoreExpr{
pos: position{line: 712, col: 20, offset: 21744},
expr: &seqExpr{
pos: position{line: 712, col: 21, offset: 21745},
exprs: []any{
¬Expr{
pos: position{line: 712, col: 21, offset: 21745},
expr: &charClassMatcher{
pos: position{line: 712, col: 22, offset: 21746},
val: "[\\r\\n]",
chars: []rune{'\r', '\n'},
ignoreCase: false,
inverted: false,
},
},
&anyMatcher{
line: 712, col: 29, offset: 21753,
},
},
},
},
},
},
{
name: "UnixComment",
pos: position{line: 716, col: 1, offset: 21790},
expr: &actionExpr{
pos: position{line: 716, col: 15, offset: 21804},
run: (*parser).callonUnixComment1,
expr: &seqExpr{
pos: position{line: 716, col: 15, offset: 21804},
exprs: []any{
&litMatcher{
pos: position{line: 716, col: 15, offset: 21804},
val: "#",
ignoreCase: false,
want: "\"#\"",
},
&ruleRefExpr{
pos: position{line: 716, col: 19, offset: 21808},
name: "UnixCommentMatch",
},
},
},
},
},
{
name: "UnixCommentMatch",
pos: position{line: 719, col: 1, offset: 21915},
expr: &actionExpr{
pos: position{line: 719, col: 20, offset: 21934},
run: (*parser).callonUnixCommentMatch1,
expr: &zeroOrMoreExpr{
pos: position{line: 719, col: 20, offset: 21934},
expr: &seqExpr{
pos: position{line: 719, col: 21, offset: 21935},
exprs: []any{
¬Expr{
pos: position{line: 719, col: 21, offset: 21935},
expr: &charClassMatcher{
pos: position{line: 719, col: 22, offset: 21936},
val: "[\\r\\n]",
chars: []rune{'\r', '\n'},
ignoreCase: false,
inverted: false,
},
},
&anyMatcher{
line: 719, col: 29, offset: 21943,
},
},
},
},
},
},
{
name: "BOOL",
pos: position{line: 723, col: 1, offset: 21981},
expr: &actionExpr{
pos: position{line: 723, col: 8, offset: 21988},
run: (*parser).callonBOOL1,
expr: &seqExpr{
pos: position{line: 723, col: 8, offset: 21988},
exprs: []any{
&labeledExpr{
pos: position{line: 723, col: 8, offset: 21988},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 723, col: 17, offset: 21997},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 723, col: 34, offset: 22014},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 723, col: 36, offset: 22016},
name: "BOOLToken",
},
},
¬Expr{
pos: position{line: 723, col: 53, offset: 22033},
expr: &ruleRefExpr{
pos: position{line: 723, col: 54, offset: 22034},
name: "LetterOrDigit",
},
},
&zeroOrMoreExpr{
pos: position{line: 723, col: 69, offset: 22049},
expr: &ruleRefExpr{
pos: position{line: 723, col: 69, offset: 22049},
name: "Indent",
},
},
},
},
},
},
{
name: "BOOLToken",
pos: position{line: 729, col: 1, offset: 22136},
expr: &actionExpr{
pos: position{line: 729, col: 14, offset: 22149},
run: (*parser).callonBOOLToken1,
expr: &litMatcher{
pos: position{line: 729, col: 14, offset: 22149},
val: "bool",
ignoreCase: false,
want: "\"bool\"",
},
},
},
{
name: "BYTE",
pos: position{line: 733, col: 1, offset: 22209},
expr: &actionExpr{
pos: position{line: 733, col: 8, offset: 22216},
run: (*parser).callonBYTE1,
expr: &seqExpr{
pos: position{line: 733, col: 8, offset: 22216},
exprs: []any{
&labeledExpr{
pos: position{line: 733, col: 8, offset: 22216},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 733, col: 17, offset: 22225},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 733, col: 34, offset: 22242},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 733, col: 36, offset: 22244},
name: "BYTEToken",
},
},
¬Expr{
pos: position{line: 733, col: 53, offset: 22261},
expr: &ruleRefExpr{
pos: position{line: 733, col: 54, offset: 22262},
name: "LetterOrDigit",
},
},
&zeroOrMoreExpr{
pos: position{line: 733, col: 69, offset: 22277},
expr: &ruleRefExpr{
pos: position{line: 733, col: 69, offset: 22277},
name: "Indent",
},
},
},
},
},
},
{
name: "BYTEToken",
pos: position{line: 739, col: 1, offset: 22364},
expr: &actionExpr{
pos: position{line: 739, col: 13, offset: 22376},
run: (*parser).callonBYTEToken1,
expr: &litMatcher{
pos: position{line: 739, col: 13, offset: 22376},
val: "byte",
ignoreCase: false,
want: "\"byte\"",
},
},
},
{
name: "I8",
pos: position{line: 743, col: 1, offset: 22436},
expr: &actionExpr{
pos: position{line: 743, col: 6, offset: 22441},
run: (*parser).callonI81,
expr: &seqExpr{
pos: position{line: 743, col: 6, offset: 22441},
exprs: []any{
&labeledExpr{
pos: position{line: 743, col: 6, offset: 22441},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 743, col: 15, offset: 22450},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 743, col: 32, offset: 22467},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 743, col: 34, offset: 22469},
name: "I8Token",
},
},
¬Expr{
pos: position{line: 743, col: 51, offset: 22486},
expr: &ruleRefExpr{
pos: position{line: 743, col: 52, offset: 22487},
name: "LetterOrDigit",
},
},
&zeroOrMoreExpr{
pos: position{line: 743, col: 67, offset: 22502},
expr: &ruleRefExpr{
pos: position{line: 743, col: 67, offset: 22502},
name: "Indent",
},
},
},
},
},
},
{
name: "I8Token",
pos: position{line: 749, col: 1, offset: 22589},
expr: &actionExpr{
pos: position{line: 749, col: 11, offset: 22599},
run: (*parser).callonI8Token1,
expr: &litMatcher{
pos: position{line: 749, col: 11, offset: 22599},
val: "i8",
ignoreCase: false,
want: "\"i8\"",
},
},
},
{
name: "I16",
pos: position{line: 754, col: 1, offset: 22658},
expr: &actionExpr{
pos: position{line: 754, col: 7, offset: 22664},
run: (*parser).callonI161,
expr: &seqExpr{
pos: position{line: 754, col: 7, offset: 22664},
exprs: []any{
&labeledExpr{
pos: position{line: 754, col: 7, offset: 22664},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 754, col: 16, offset: 22673},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 754, col: 33, offset: 22690},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 754, col: 35, offset: 22692},
name: "I16Token",
},
},
¬Expr{
pos: position{line: 754, col: 52, offset: 22709},
expr: &ruleRefExpr{
pos: position{line: 754, col: 53, offset: 22710},
name: "LetterOrDigit",
},
},
&zeroOrMoreExpr{
pos: position{line: 754, col: 68, offset: 22725},
expr: &ruleRefExpr{
pos: position{line: 754, col: 68, offset: 22725},
name: "Indent",
},
},
},
},
},
},
{
name: "I16Token",
pos: position{line: 760, col: 1, offset: 22812},
expr: &actionExpr{
pos: position{line: 760, col: 12, offset: 22823},
run: (*parser).callonI16Token1,
expr: &litMatcher{
pos: position{line: 760, col: 12, offset: 22823},
val: "i16",
ignoreCase: false,
want: "\"i16\"",
},
},
},
{
name: "I32",
pos: position{line: 764, col: 1, offset: 22882},
expr: &actionExpr{
pos: position{line: 764, col: 7, offset: 22888},
run: (*parser).callonI321,
expr: &seqExpr{
pos: position{line: 764, col: 7, offset: 22888},
exprs: []any{
&labeledExpr{
pos: position{line: 764, col: 7, offset: 22888},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 764, col: 16, offset: 22897},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 764, col: 33, offset: 22914},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 764, col: 35, offset: 22916},
name: "I32Token",
},
},
¬Expr{
pos: position{line: 764, col: 52, offset: 22933},
expr: &ruleRefExpr{
pos: position{line: 764, col: 53, offset: 22934},
name: "LetterOrDigit",
},
},
&zeroOrMoreExpr{
pos: position{line: 764, col: 68, offset: 22949},
expr: &ruleRefExpr{
pos: position{line: 764, col: 68, offset: 22949},
name: "Indent",
},
},
},
},
},
},
{
name: "I32Token",
pos: position{line: 770, col: 1, offset: 23036},
expr: &actionExpr{
pos: position{line: 770, col: 12, offset: 23047},
run: (*parser).callonI32Token1,
expr: &litMatcher{
pos: position{line: 770, col: 12, offset: 23047},
val: "i32",
ignoreCase: false,
want: "\"i32\"",
},
},
},
{
name: "I64",
pos: position{line: 774, col: 1, offset: 23106},
expr: &actionExpr{
pos: position{line: 774, col: 7, offset: 23112},
run: (*parser).callonI641,
expr: &seqExpr{
pos: position{line: 774, col: 7, offset: 23112},
exprs: []any{
&labeledExpr{
pos: position{line: 774, col: 7, offset: 23112},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 774, col: 16, offset: 23121},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 774, col: 33, offset: 23138},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 774, col: 35, offset: 23140},
name: "I64Token",
},
},
¬Expr{
pos: position{line: 774, col: 52, offset: 23157},
expr: &ruleRefExpr{
pos: position{line: 774, col: 53, offset: 23158},
name: "LetterOrDigit",
},
},
&zeroOrMoreExpr{
pos: position{line: 774, col: 68, offset: 23173},
expr: &ruleRefExpr{
pos: position{line: 774, col: 68, offset: 23173},
name: "Indent",
},
},
},
},
},
},
{
name: "I64Token",
pos: position{line: 780, col: 1, offset: 23260},
expr: &actionExpr{
pos: position{line: 780, col: 12, offset: 23271},
run: (*parser).callonI64Token1,
expr: &litMatcher{
pos: position{line: 780, col: 12, offset: 23271},
val: "i64",
ignoreCase: false,
want: "\"i64\"",
},
},
},
{
name: "DOUBLE",
pos: position{line: 784, col: 1, offset: 23330},
expr: &actionExpr{
pos: position{line: 784, col: 10, offset: 23339},
run: (*parser).callonDOUBLE1,
expr: &seqExpr{
pos: position{line: 784, col: 10, offset: 23339},
exprs: []any{
&labeledExpr{
pos: position{line: 784, col: 10, offset: 23339},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 784, col: 19, offset: 23348},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 784, col: 36, offset: 23365},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 784, col: 38, offset: 23367},
name: "DOUBLEToken",
},
},
¬Expr{
pos: position{line: 784, col: 55, offset: 23384},
expr: &ruleRefExpr{
pos: position{line: 784, col: 56, offset: 23385},
name: "LetterOrDigit",
},
},
&zeroOrMoreExpr{
pos: position{line: 784, col: 71, offset: 23400},
expr: &ruleRefExpr{
pos: position{line: 784, col: 71, offset: 23400},
name: "Indent",
},
},
},
},
},
},
{
name: "DOUBLEToken",
pos: position{line: 790, col: 1, offset: 23487},
expr: &actionExpr{
pos: position{line: 790, col: 15, offset: 23501},
run: (*parser).callonDOUBLEToken1,
expr: &litMatcher{
pos: position{line: 790, col: 15, offset: 23501},
val: "double",
ignoreCase: false,
want: "\"double\"",
},
},
},
{
name: "STRING",
pos: position{line: 794, col: 1, offset: 23563},
expr: &actionExpr{
pos: position{line: 794, col: 10, offset: 23572},
run: (*parser).callonSTRING1,
expr: &seqExpr{
pos: position{line: 794, col: 10, offset: 23572},
exprs: []any{
&labeledExpr{
pos: position{line: 794, col: 10, offset: 23572},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 794, col: 19, offset: 23581},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 794, col: 36, offset: 23598},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 794, col: 38, offset: 23600},
name: "STRINGToken",
},
},
¬Expr{
pos: position{line: 794, col: 55, offset: 23617},
expr: &ruleRefExpr{
pos: position{line: 794, col: 56, offset: 23618},
name: "LetterOrDigit",
},
},
&zeroOrMoreExpr{
pos: position{line: 794, col: 71, offset: 23633},
expr: &ruleRefExpr{
pos: position{line: 794, col: 71, offset: 23633},
name: "Indent",
},
},
},
},
},
},
{
name: "STRINGToken",
pos: position{line: 800, col: 1, offset: 23720},
expr: &actionExpr{
pos: position{line: 800, col: 15, offset: 23734},
run: (*parser).callonSTRINGToken1,
expr: &litMatcher{
pos: position{line: 800, col: 15, offset: 23734},
val: "string",
ignoreCase: false,
want: "\"string\"",
},
},
},
{
name: "BINARY",
pos: position{line: 804, col: 1, offset: 23796},
expr: &actionExpr{
pos: position{line: 804, col: 10, offset: 23805},
run: (*parser).callonBINARY1,
expr: &seqExpr{
pos: position{line: 804, col: 10, offset: 23805},
exprs: []any{
&labeledExpr{
pos: position{line: 804, col: 10, offset: 23805},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 804, col: 19, offset: 23814},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 804, col: 36, offset: 23831},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 804, col: 38, offset: 23833},
name: "BINARYToken",
},
},
¬Expr{
pos: position{line: 804, col: 55, offset: 23850},
expr: &ruleRefExpr{
pos: position{line: 804, col: 56, offset: 23851},
name: "LetterOrDigit",
},
},
&zeroOrMoreExpr{
pos: position{line: 804, col: 71, offset: 23866},
expr: &ruleRefExpr{
pos: position{line: 804, col: 71, offset: 23866},
name: "Indent",
},
},
},
},
},
},
{
name: "BINARYToken",
pos: position{line: 810, col: 1, offset: 23953},
expr: &actionExpr{
pos: position{line: 810, col: 15, offset: 23967},
run: (*parser).callonBINARYToken1,
expr: &litMatcher{
pos: position{line: 810, col: 15, offset: 23967},
val: "binary",
ignoreCase: false,
want: "\"binary\"",
},
},
},
{
name: "UUID",
pos: position{line: 814, col: 1, offset: 24029},
expr: &actionExpr{
pos: position{line: 814, col: 8, offset: 24036},
run: (*parser).callonUUID1,
expr: &seqExpr{
pos: position{line: 814, col: 8, offset: 24036},
exprs: []any{
&labeledExpr{
pos: position{line: 814, col: 8, offset: 24036},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 814, col: 17, offset: 24045},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 814, col: 34, offset: 24062},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 814, col: 36, offset: 24064},
name: "UUIDToken",
},
},
¬Expr{
pos: position{line: 814, col: 51, offset: 24079},
expr: &ruleRefExpr{
pos: position{line: 814, col: 52, offset: 24080},
name: "LetterOrDigit",
},
},
&zeroOrMoreExpr{
pos: position{line: 814, col: 67, offset: 24095},
expr: &ruleRefExpr{
pos: position{line: 814, col: 67, offset: 24095},
name: "Indent",
},
},
},
},
},
},
{
name: "UUIDToken",
pos: position{line: 820, col: 1, offset: 24182},
expr: &actionExpr{
pos: position{line: 820, col: 13, offset: 24194},
run: (*parser).callonUUIDToken1,
expr: &litMatcher{
pos: position{line: 820, col: 13, offset: 24194},
val: "uuid",
ignoreCase: false,
want: "\"uuid\"",
},
},
},
{
name: "MAP",
pos: position{line: 824, col: 1, offset: 24254},
expr: &actionExpr{
pos: position{line: 824, col: 7, offset: 24260},
run: (*parser).callonMAP1,
expr: &seqExpr{
pos: position{line: 824, col: 7, offset: 24260},
exprs: []any{
&labeledExpr{
pos: position{line: 824, col: 7, offset: 24260},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 824, col: 16, offset: 24269},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 824, col: 33, offset: 24286},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 824, col: 35, offset: 24288},
name: "MAPToken",
},
},
¬Expr{
pos: position{line: 824, col: 54, offset: 24307},
expr: &ruleRefExpr{
pos: position{line: 824, col: 55, offset: 24308},
name: "LetterOrDigit",
},
},
&zeroOrMoreExpr{
pos: position{line: 824, col: 70, offset: 24323},
expr: &ruleRefExpr{
pos: position{line: 824, col: 70, offset: 24323},
name: "Indent",
},
},
},
},
},
},
{
name: "MAPToken",
pos: position{line: 830, col: 1, offset: 24410},
expr: &actionExpr{
pos: position{line: 830, col: 12, offset: 24421},
run: (*parser).callonMAPToken1,
expr: &litMatcher{
pos: position{line: 830, col: 12, offset: 24421},
val: "map",
ignoreCase: false,
want: "\"map\"",
},
},
},
{
name: "SET",
pos: position{line: 834, col: 1, offset: 24480},
expr: &actionExpr{
pos: position{line: 834, col: 7, offset: 24486},
run: (*parser).callonSET1,
expr: &seqExpr{
pos: position{line: 834, col: 7, offset: 24486},
exprs: []any{
&labeledExpr{
pos: position{line: 834, col: 7, offset: 24486},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 834, col: 16, offset: 24495},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 834, col: 33, offset: 24512},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 834, col: 35, offset: 24514},
name: "SETToken",
},
},
¬Expr{
pos: position{line: 834, col: 54, offset: 24533},
expr: &ruleRefExpr{
pos: position{line: 834, col: 55, offset: 24534},
name: "LetterOrDigit",
},
},
&zeroOrMoreExpr{
pos: position{line: 834, col: 70, offset: 24549},
expr: &ruleRefExpr{
pos: position{line: 834, col: 70, offset: 24549},
name: "Indent",
},
},
},
},
},
},
{
name: "SETToken",
pos: position{line: 840, col: 1, offset: 24636},
expr: &actionExpr{
pos: position{line: 840, col: 12, offset: 24647},
run: (*parser).callonSETToken1,
expr: &litMatcher{
pos: position{line: 840, col: 12, offset: 24647},
val: "set",
ignoreCase: false,
want: "\"set\"",
},
},
},
{
name: "LIST",
pos: position{line: 844, col: 1, offset: 24706},
expr: &actionExpr{
pos: position{line: 844, col: 8, offset: 24713},
run: (*parser).callonLIST1,
expr: &seqExpr{
pos: position{line: 844, col: 8, offset: 24713},
exprs: []any{
&labeledExpr{
pos: position{line: 844, col: 8, offset: 24713},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 844, col: 17, offset: 24722},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 844, col: 34, offset: 24739},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 844, col: 36, offset: 24741},
name: "ListToken",
},
},
¬Expr{
pos: position{line: 844, col: 55, offset: 24760},
expr: &ruleRefExpr{
pos: position{line: 844, col: 56, offset: 24761},
name: "LetterOrDigit",
},
},
&zeroOrMoreExpr{
pos: position{line: 844, col: 71, offset: 24776},
expr: &ruleRefExpr{
pos: position{line: 844, col: 71, offset: 24776},
name: "Indent",
},
},
},
},
},
},
{
name: "ListToken",
pos: position{line: 850, col: 1, offset: 24863},
expr: &actionExpr{
pos: position{line: 850, col: 13, offset: 24875},
run: (*parser).callonListToken1,
expr: &litMatcher{
pos: position{line: 850, col: 13, offset: 24875},
val: "list",
ignoreCase: false,
want: "\"list\"",
},
},
},
{
name: "CONST",
pos: position{line: 854, col: 1, offset: 24935},
expr: &actionExpr{
pos: position{line: 854, col: 9, offset: 24943},
run: (*parser).callonCONST1,
expr: &seqExpr{
pos: position{line: 854, col: 9, offset: 24943},
exprs: []any{
&labeledExpr{
pos: position{line: 854, col: 9, offset: 24943},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 854, col: 18, offset: 24952},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 854, col: 35, offset: 24969},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 854, col: 37, offset: 24971},
name: "CONSTToken",
},
},
¬Expr{
pos: position{line: 854, col: 56, offset: 24990},
expr: &ruleRefExpr{
pos: position{line: 854, col: 57, offset: 24991},
name: "LetterOrDigit",
},
},
&zeroOrMoreExpr{
pos: position{line: 854, col: 72, offset: 25006},
expr: &ruleRefExpr{
pos: position{line: 854, col: 72, offset: 25006},
name: "Indent",
},
},
},
},
},
},
{
name: "CONSTToken",
pos: position{line: 860, col: 1, offset: 25149},
expr: &actionExpr{
pos: position{line: 860, col: 14, offset: 25162},
run: (*parser).callonCONSTToken1,
expr: &litMatcher{
pos: position{line: 860, col: 14, offset: 25162},
val: "const",
ignoreCase: false,
want: "\"const\"",
},
},
},
{
name: "ONEWAY",
pos: position{line: 864, col: 1, offset: 25209},
expr: &actionExpr{
pos: position{line: 864, col: 10, offset: 25218},
run: (*parser).callonONEWAY1,
expr: &seqExpr{
pos: position{line: 864, col: 10, offset: 25218},
exprs: []any{
&labeledExpr{
pos: position{line: 864, col: 10, offset: 25218},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 864, col: 19, offset: 25227},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 864, col: 36, offset: 25244},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 864, col: 38, offset: 25246},
name: "ONEWAYToken",
},
},
¬Expr{
pos: position{line: 864, col: 57, offset: 25265},
expr: &ruleRefExpr{
pos: position{line: 864, col: 58, offset: 25266},
name: "LetterOrDigit",
},
},
&zeroOrMoreExpr{
pos: position{line: 864, col: 73, offset: 25281},
expr: &ruleRefExpr{
pos: position{line: 864, col: 73, offset: 25281},
name: "Indent",
},
},
},
},
},
},
{
name: "ONEWAYToken",
pos: position{line: 870, col: 1, offset: 25425},
expr: &actionExpr{
pos: position{line: 870, col: 15, offset: 25439},
run: (*parser).callonONEWAYToken1,
expr: &litMatcher{
pos: position{line: 870, col: 15, offset: 25439},
val: "oneway",
ignoreCase: false,
want: "\"oneway\"",
},
},
},
{
name: "TYPEDEF",
pos: position{line: 874, col: 1, offset: 25487},
expr: &actionExpr{
pos: position{line: 874, col: 11, offset: 25497},
run: (*parser).callonTYPEDEF1,
expr: &seqExpr{
pos: position{line: 874, col: 11, offset: 25497},
exprs: []any{
&labeledExpr{
pos: position{line: 874, col: 11, offset: 25497},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 874, col: 20, offset: 25506},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 874, col: 37, offset: 25523},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 874, col: 39, offset: 25525},
name: "TYPEDEFToken",
},
},
¬Expr{
pos: position{line: 874, col: 56, offset: 25542},
expr: &ruleRefExpr{
pos: position{line: 874, col: 57, offset: 25543},
name: "LetterOrDigit",
},
},
&zeroOrMoreExpr{
pos: position{line: 874, col: 72, offset: 25558},
expr: &ruleRefExpr{
pos: position{line: 874, col: 72, offset: 25558},
name: "Indent",
},
},
},
},
},
},
{
name: "TYPEDEFToken",
pos: position{line: 880, col: 1, offset: 25703},
expr: &actionExpr{
pos: position{line: 880, col: 16, offset: 25718},
run: (*parser).callonTYPEDEFToken1,
expr: &litMatcher{
pos: position{line: 880, col: 16, offset: 25718},
val: "typedef",
ignoreCase: false,
want: "\"typedef\"",
},
},
},
{
name: "VOID",
pos: position{line: 885, col: 1, offset: 25768},
expr: &actionExpr{
pos: position{line: 885, col: 15, offset: 25782},
run: (*parser).callonVOID1,
expr: &seqExpr{
pos: position{line: 885, col: 15, offset: 25782},
exprs: []any{
&labeledExpr{
pos: position{line: 885, col: 15, offset: 25782},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 885, col: 24, offset: 25791},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 885, col: 41, offset: 25808},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 885, col: 43, offset: 25810},
name: "VOIDToken",
},
},
¬Expr{
pos: position{line: 885, col: 61, offset: 25828},
expr: &ruleRefExpr{
pos: position{line: 885, col: 62, offset: 25829},
name: "LetterOrDigit",
},
},
&zeroOrMoreExpr{
pos: position{line: 885, col: 77, offset: 25844},
expr: &ruleRefExpr{
pos: position{line: 885, col: 77, offset: 25844},
name: "Indent",
},
},
},
},
},
},
{
name: "VOIDToken",
pos: position{line: 890, col: 1, offset: 25985},
expr: &actionExpr{
pos: position{line: 890, col: 13, offset: 25997},
run: (*parser).callonVOIDToken1,
expr: &litMatcher{
pos: position{line: 890, col: 13, offset: 25997},
val: "void",
ignoreCase: false,
want: "\"void\"",
},
},
},
{
name: "THROWS",
pos: position{line: 894, col: 1, offset: 26043},
expr: &actionExpr{
pos: position{line: 894, col: 15, offset: 26057},
run: (*parser).callonTHROWS1,
expr: &seqExpr{
pos: position{line: 894, col: 15, offset: 26057},
exprs: []any{
&labeledExpr{
pos: position{line: 894, col: 15, offset: 26057},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 894, col: 24, offset: 26066},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 894, col: 41, offset: 26083},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 894, col: 43, offset: 26085},
name: "THROWSToken",
},
},
¬Expr{
pos: position{line: 894, col: 62, offset: 26104},
expr: &ruleRefExpr{
pos: position{line: 894, col: 63, offset: 26105},
name: "LetterOrDigit",
},
},
&zeroOrMoreExpr{
pos: position{line: 894, col: 78, offset: 26120},
expr: &ruleRefExpr{
pos: position{line: 894, col: 78, offset: 26120},
name: "Indent",
},
},
},
},
},
},
{
name: "THROWSToken",
pos: position{line: 899, col: 1, offset: 26263},
expr: &actionExpr{
pos: position{line: 899, col: 15, offset: 26277},
run: (*parser).callonTHROWSToken1,
expr: &litMatcher{
pos: position{line: 899, col: 15, offset: 26277},
val: "throws",
ignoreCase: false,
want: "\"throws\"",
},
},
},
{
name: "EXCEPTION",
pos: position{line: 903, col: 1, offset: 26325},
expr: &actionExpr{
pos: position{line: 903, col: 15, offset: 26339},
run: (*parser).callonEXCEPTION1,
expr: &seqExpr{
pos: position{line: 903, col: 15, offset: 26339},
exprs: []any{
&labeledExpr{
pos: position{line: 903, col: 15, offset: 26339},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 903, col: 24, offset: 26348},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 903, col: 41, offset: 26365},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 903, col: 43, offset: 26367},
name: "EXCEPTIONToken",
},
},
¬Expr{
pos: position{line: 903, col: 62, offset: 26386},
expr: &ruleRefExpr{
pos: position{line: 903, col: 63, offset: 26387},
name: "LetterOrDigit",
},
},
&zeroOrMoreExpr{
pos: position{line: 903, col: 78, offset: 26402},
expr: &ruleRefExpr{
pos: position{line: 903, col: 78, offset: 26402},
name: "Indent",
},
},
},
},
},
},
{
name: "EXCEPTIONToken",
pos: position{line: 908, col: 1, offset: 26548},
expr: &actionExpr{
pos: position{line: 908, col: 18, offset: 26565},
run: (*parser).callonEXCEPTIONToken1,
expr: &litMatcher{
pos: position{line: 908, col: 18, offset: 26565},
val: "exception",
ignoreCase: false,
want: "\"exception\"",
},
},
},
{
name: "EXTENDS",
pos: position{line: 913, col: 1, offset: 26617},
expr: &actionExpr{
pos: position{line: 913, col: 15, offset: 26631},
run: (*parser).callonEXTENDS1,
expr: &seqExpr{
pos: position{line: 913, col: 15, offset: 26631},
exprs: []any{
&labeledExpr{
pos: position{line: 913, col: 15, offset: 26631},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 913, col: 24, offset: 26640},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 913, col: 41, offset: 26657},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 913, col: 43, offset: 26659},
name: "EXTENDSToken",
},
},
¬Expr{
pos: position{line: 913, col: 62, offset: 26678},
expr: &ruleRefExpr{
pos: position{line: 913, col: 63, offset: 26679},
name: "LetterOrDigit",
},
},
&zeroOrMoreExpr{
pos: position{line: 913, col: 78, offset: 26694},
expr: &ruleRefExpr{
pos: position{line: 913, col: 78, offset: 26694},
name: "Indent",
},
},
},
},
},
},
{
name: "EXTENDSToken",
pos: position{line: 918, col: 1, offset: 26838},
expr: &actionExpr{
pos: position{line: 918, col: 16, offset: 26853},
run: (*parser).callonEXTENDSToken1,
expr: &litMatcher{
pos: position{line: 918, col: 16, offset: 26853},
val: "extends",
ignoreCase: false,
want: "\"extends\"",
},
},
},
{
name: "SERVICE",
pos: position{line: 922, col: 1, offset: 26902},
expr: &actionExpr{
pos: position{line: 922, col: 15, offset: 26916},
run: (*parser).callonSERVICE1,
expr: &seqExpr{
pos: position{line: 922, col: 15, offset: 26916},
exprs: []any{
&labeledExpr{
pos: position{line: 922, col: 15, offset: 26916},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 922, col: 24, offset: 26925},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 922, col: 41, offset: 26942},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 922, col: 43, offset: 26944},
name: "SERVICEToken",
},
},
¬Expr{
pos: position{line: 922, col: 62, offset: 26963},
expr: &ruleRefExpr{
pos: position{line: 922, col: 63, offset: 26964},
name: "LetterOrDigit",
},
},
&zeroOrMoreExpr{
pos: position{line: 922, col: 78, offset: 26979},
expr: &ruleRefExpr{
pos: position{line: 922, col: 78, offset: 26979},
name: "Indent",
},
},
},
},
},
},
{
name: "SERVICEToken",
pos: position{line: 927, col: 1, offset: 27123},
expr: &actionExpr{
pos: position{line: 927, col: 16, offset: 27138},
run: (*parser).callonSERVICEToken1,
expr: &litMatcher{
pos: position{line: 927, col: 16, offset: 27138},
val: "service",
ignoreCase: false,
want: "\"service\"",
},
},
},
{
name: "STRUCT",
pos: position{line: 931, col: 1, offset: 27187},
expr: &actionExpr{
pos: position{line: 931, col: 15, offset: 27201},
run: (*parser).callonSTRUCT1,
expr: &seqExpr{
pos: position{line: 931, col: 15, offset: 27201},
exprs: []any{
&labeledExpr{
pos: position{line: 931, col: 15, offset: 27201},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 931, col: 24, offset: 27210},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 931, col: 41, offset: 27227},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 931, col: 43, offset: 27229},
name: "STRUCTToken",
},
},
¬Expr{
pos: position{line: 931, col: 62, offset: 27248},
expr: &ruleRefExpr{
pos: position{line: 931, col: 63, offset: 27249},
name: "LetterOrDigit",
},
},
&zeroOrMoreExpr{
pos: position{line: 931, col: 78, offset: 27264},
expr: &ruleRefExpr{
pos: position{line: 931, col: 78, offset: 27264},
name: "Indent",
},
},
},
},
},
},
{
name: "STRUCTToken",
pos: position{line: 936, col: 1, offset: 27407},
expr: &actionExpr{
pos: position{line: 936, col: 15, offset: 27421},
run: (*parser).callonSTRUCTToken1,
expr: &litMatcher{
pos: position{line: 936, col: 15, offset: 27421},
val: "struct",
ignoreCase: false,
want: "\"struct\"",
},
},
},
{
name: "UNION",
pos: position{line: 940, col: 1, offset: 27469},
expr: &actionExpr{
pos: position{line: 940, col: 15, offset: 27483},
run: (*parser).callonUNION1,
expr: &seqExpr{
pos: position{line: 940, col: 15, offset: 27483},
exprs: []any{
&labeledExpr{
pos: position{line: 940, col: 15, offset: 27483},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 940, col: 24, offset: 27492},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 940, col: 41, offset: 27509},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 940, col: 43, offset: 27511},
name: "UNIONToken",
},
},
¬Expr{
pos: position{line: 940, col: 61, offset: 27529},
expr: &ruleRefExpr{
pos: position{line: 940, col: 62, offset: 27530},
name: "LetterOrDigit",
},
},
&zeroOrMoreExpr{
pos: position{line: 940, col: 77, offset: 27545},
expr: &ruleRefExpr{
pos: position{line: 940, col: 77, offset: 27545},
name: "Indent",
},
},
},
},
},
},
{
name: "UNIONToken",
pos: position{line: 945, col: 1, offset: 27687},
expr: &actionExpr{
pos: position{line: 945, col: 14, offset: 27700},
run: (*parser).callonUNIONToken1,
expr: &litMatcher{
pos: position{line: 945, col: 14, offset: 27700},
val: "union",
ignoreCase: false,
want: "\"union\"",
},
},
},
{
name: "ENUM",
pos: position{line: 949, col: 1, offset: 27747},
expr: &actionExpr{
pos: position{line: 949, col: 15, offset: 27761},
run: (*parser).callonENUM1,
expr: &seqExpr{
pos: position{line: 949, col: 15, offset: 27761},
exprs: []any{
&labeledExpr{
pos: position{line: 949, col: 15, offset: 27761},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 949, col: 24, offset: 27770},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 949, col: 41, offset: 27787},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 949, col: 43, offset: 27789},
name: "ENUMToken",
},
},
¬Expr{
pos: position{line: 949, col: 62, offset: 27808},
expr: &ruleRefExpr{
pos: position{line: 949, col: 63, offset: 27809},
name: "LetterOrDigit",
},
},
&zeroOrMoreExpr{
pos: position{line: 949, col: 78, offset: 27824},
expr: &ruleRefExpr{
pos: position{line: 949, col: 78, offset: 27824},
name: "Indent",
},
},
},
},
},
},
{
name: "ENUMToken",
pos: position{line: 954, col: 1, offset: 27965},
expr: &actionExpr{
pos: position{line: 954, col: 13, offset: 27977},
run: (*parser).callonENUMToken1,
expr: &litMatcher{
pos: position{line: 954, col: 13, offset: 27977},
val: "enum",
ignoreCase: false,
want: "\"enum\"",
},
},
},
{
name: "INCLUDE",
pos: position{line: 958, col: 1, offset: 28023},
expr: &actionExpr{
pos: position{line: 958, col: 15, offset: 28037},
run: (*parser).callonINCLUDE1,
expr: &seqExpr{
pos: position{line: 958, col: 15, offset: 28037},
exprs: []any{
&labeledExpr{
pos: position{line: 958, col: 15, offset: 28037},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 958, col: 24, offset: 28046},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 958, col: 41, offset: 28063},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 958, col: 43, offset: 28065},
name: "INCLUDEToken",
},
},
¬Expr{
pos: position{line: 958, col: 62, offset: 28084},
expr: &ruleRefExpr{
pos: position{line: 958, col: 63, offset: 28085},
name: "LetterOrDigit",
},
},
&zeroOrMoreExpr{
pos: position{line: 958, col: 78, offset: 28100},
expr: &ruleRefExpr{
pos: position{line: 958, col: 78, offset: 28100},
name: "Indent",
},
},
},
},
},
},
{
name: "INCLUDEToken",
pos: position{line: 963, col: 1, offset: 28244},
expr: &actionExpr{
pos: position{line: 963, col: 16, offset: 28259},
run: (*parser).callonINCLUDEToken1,
expr: &litMatcher{
pos: position{line: 963, col: 16, offset: 28259},
val: "include",
ignoreCase: false,
want: "\"include\"",
},
},
},
{
name: "CPPINCLUDE",
pos: position{line: 967, col: 1, offset: 28308},
expr: &actionExpr{
pos: position{line: 967, col: 15, offset: 28322},
run: (*parser).callonCPPINCLUDE1,
expr: &seqExpr{
pos: position{line: 967, col: 15, offset: 28322},
exprs: []any{
&labeledExpr{
pos: position{line: 967, col: 15, offset: 28322},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 967, col: 24, offset: 28331},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 967, col: 41, offset: 28348},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 967, col: 43, offset: 28350},
name: "CPPINCLUDEToken",
},
},
¬Expr{
pos: position{line: 967, col: 61, offset: 28368},
expr: &ruleRefExpr{
pos: position{line: 967, col: 62, offset: 28369},
name: "LetterOrDigit",
},
},
&zeroOrMoreExpr{
pos: position{line: 967, col: 77, offset: 28384},
expr: &ruleRefExpr{
pos: position{line: 967, col: 77, offset: 28384},
name: "Indent",
},
},
},
},
},
},
{
name: "CPPINCLUDEToken",
pos: position{line: 972, col: 1, offset: 28531},
expr: &actionExpr{
pos: position{line: 972, col: 19, offset: 28549},
run: (*parser).callonCPPINCLUDEToken1,
expr: &litMatcher{
pos: position{line: 972, col: 19, offset: 28549},
val: "cpp_include",
ignoreCase: false,
want: "\"cpp_include\"",
},
},
},
{
name: "NAMESPACE",
pos: position{line: 976, col: 1, offset: 28602},
expr: &actionExpr{
pos: position{line: 976, col: 15, offset: 28616},
run: (*parser).callonNAMESPACE1,
expr: &seqExpr{
pos: position{line: 976, col: 15, offset: 28616},
exprs: []any{
&labeledExpr{
pos: position{line: 976, col: 15, offset: 28616},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 976, col: 24, offset: 28625},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 976, col: 41, offset: 28642},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 976, col: 43, offset: 28644},
name: "NAMESPACEToken",
},
},
¬Expr{
pos: position{line: 976, col: 62, offset: 28663},
expr: &ruleRefExpr{
pos: position{line: 976, col: 63, offset: 28664},
name: "LetterOrDigit",
},
},
&zeroOrMoreExpr{
pos: position{line: 976, col: 78, offset: 28679},
expr: &ruleRefExpr{
pos: position{line: 976, col: 78, offset: 28679},
name: "Indent",
},
},
},
},
},
},
{
name: "NAMESPACEToken",
pos: position{line: 981, col: 1, offset: 28825},
expr: &actionExpr{
pos: position{line: 981, col: 18, offset: 28842},
run: (*parser).callonNAMESPACEToken1,
expr: &litMatcher{
pos: position{line: 981, col: 18, offset: 28842},
val: "namespace",
ignoreCase: false,
want: "\"namespace\"",
},
},
},
{
name: "CPPTYPE",
pos: position{line: 986, col: 1, offset: 28894},
expr: &actionExpr{
pos: position{line: 986, col: 15, offset: 28908},
run: (*parser).callonCPPTYPE1,
expr: &seqExpr{
pos: position{line: 986, col: 15, offset: 28908},
exprs: []any{
&labeledExpr{
pos: position{line: 986, col: 15, offset: 28908},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 986, col: 24, offset: 28917},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 986, col: 41, offset: 28934},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 986, col: 43, offset: 28936},
name: "CPPTYPEToken",
},
},
¬Expr{
pos: position{line: 986, col: 61, offset: 28954},
expr: &ruleRefExpr{
pos: position{line: 986, col: 62, offset: 28955},
name: "LetterOrDigit",
},
},
&zeroOrMoreExpr{
pos: position{line: 986, col: 77, offset: 28970},
expr: &ruleRefExpr{
pos: position{line: 986, col: 77, offset: 28970},
name: "Indent",
},
},
},
},
},
},
{
name: "CPPTYPEToken",
pos: position{line: 991, col: 1, offset: 29114},
expr: &actionExpr{
pos: position{line: 991, col: 16, offset: 29129},
run: (*parser).callonCPPTYPEToken1,
expr: &litMatcher{
pos: position{line: 991, col: 16, offset: 29129},
val: "cpp_type",
ignoreCase: false,
want: "\"cpp_type\"",
},
},
},
{
name: "LBRK",
pos: position{line: 996, col: 1, offset: 29180},
expr: &actionExpr{
pos: position{line: 996, col: 15, offset: 29194},
run: (*parser).callonLBRK1,
expr: &seqExpr{
pos: position{line: 996, col: 15, offset: 29194},
exprs: []any{
&labeledExpr{
pos: position{line: 996, col: 15, offset: 29194},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 996, col: 24, offset: 29203},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 996, col: 41, offset: 29220},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 996, col: 43, offset: 29222},
name: "LBRKToken",
},
},
&zeroOrMoreExpr{
pos: position{line: 996, col: 57, offset: 29236},
expr: &ruleRefExpr{
pos: position{line: 996, col: 57, offset: 29236},
name: "Indent",
},
},
},
},
},
},
{
name: "LBRKToken",
pos: position{line: 1001, col: 1, offset: 29377},
expr: &actionExpr{
pos: position{line: 1001, col: 13, offset: 29389},
run: (*parser).callonLBRKToken1,
expr: &litMatcher{
pos: position{line: 1001, col: 13, offset: 29389},
val: "[",
ignoreCase: false,
want: "\"[\"",
},
},
},
{
name: "RBRK",
pos: position{line: 1005, col: 1, offset: 29432},
expr: &actionExpr{
pos: position{line: 1005, col: 15, offset: 29446},
run: (*parser).callonRBRK1,
expr: &seqExpr{
pos: position{line: 1005, col: 15, offset: 29446},
exprs: []any{
&labeledExpr{
pos: position{line: 1005, col: 15, offset: 29446},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 1005, col: 24, offset: 29455},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 1005, col: 41, offset: 29472},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 1005, col: 43, offset: 29474},
name: "RBRKToken",
},
},
&zeroOrMoreExpr{
pos: position{line: 1005, col: 57, offset: 29488},
expr: &ruleRefExpr{
pos: position{line: 1005, col: 57, offset: 29488},
name: "Indent",
},
},
},
},
},
},
{
name: "RBRKToken",
pos: position{line: 1010, col: 1, offset: 29629},
expr: &actionExpr{
pos: position{line: 1010, col: 13, offset: 29641},
run: (*parser).callonRBRKToken1,
expr: &litMatcher{
pos: position{line: 1010, col: 13, offset: 29641},
val: "]",
ignoreCase: false,
want: "\"]\"",
},
},
},
{
name: "LCUR",
pos: position{line: 1014, col: 1, offset: 29684},
expr: &actionExpr{
pos: position{line: 1014, col: 14, offset: 29697},
run: (*parser).callonLCUR1,
expr: &seqExpr{
pos: position{line: 1014, col: 14, offset: 29697},
exprs: []any{
&labeledExpr{
pos: position{line: 1014, col: 14, offset: 29697},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 1014, col: 23, offset: 29706},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 1014, col: 40, offset: 29723},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 1014, col: 42, offset: 29725},
name: "LCURToken",
},
},
&zeroOrMoreExpr{
pos: position{line: 1014, col: 56, offset: 29739},
expr: &ruleRefExpr{
pos: position{line: 1014, col: 56, offset: 29739},
name: "Indent",
},
},
},
},
},
},
{
name: "RCUR",
pos: position{line: 1019, col: 1, offset: 29880},
expr: &actionExpr{
pos: position{line: 1019, col: 8, offset: 29887},
run: (*parser).callonRCUR1,
expr: &seqExpr{
pos: position{line: 1019, col: 8, offset: 29887},
exprs: []any{
&labeledExpr{
pos: position{line: 1019, col: 8, offset: 29887},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 1019, col: 17, offset: 29896},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 1019, col: 34, offset: 29913},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 1019, col: 36, offset: 29915},
name: "RCURToken",
},
},
&zeroOrMoreExpr{
pos: position{line: 1019, col: 50, offset: 29929},
expr: &ruleRefExpr{
pos: position{line: 1019, col: 50, offset: 29929},
name: "Indent",
},
},
},
},
},
},
{
name: "LCURToken",
pos: position{line: 1024, col: 1, offset: 30070},
expr: &actionExpr{
pos: position{line: 1024, col: 13, offset: 30082},
run: (*parser).callonLCURToken1,
expr: &litMatcher{
pos: position{line: 1024, col: 13, offset: 30082},
val: "{",
ignoreCase: false,
want: "\"{\"",
},
},
},
{
name: "RCURToken",
pos: position{line: 1027, col: 1, offset: 30124},
expr: &choiceExpr{
pos: position{line: 1027, col: 13, offset: 30136},
alternatives: []any{
&actionExpr{
pos: position{line: 1027, col: 13, offset: 30136},
run: (*parser).callonRCURToken2,
expr: &litMatcher{
pos: position{line: 1027, col: 13, offset: 30136},
val: "}",
ignoreCase: false,
want: "\"}\"",
},
},
&throwExpr{
pos: position{line: 1029, col: 5, offset: 30180},
label: "errRCUR",
},
},
},
},
{
name: "EQUAL",
pos: position{line: 1032, col: 1, offset: 30193},
expr: &actionExpr{
pos: position{line: 1032, col: 9, offset: 30201},
run: (*parser).callonEQUAL1,
expr: &seqExpr{
pos: position{line: 1032, col: 9, offset: 30201},
exprs: []any{
&labeledExpr{
pos: position{line: 1032, col: 9, offset: 30201},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 1032, col: 18, offset: 30210},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 1032, col: 35, offset: 30227},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 1032, col: 37, offset: 30229},
name: "EQUALToken",
},
},
&zeroOrMoreExpr{
pos: position{line: 1032, col: 52, offset: 30244},
expr: &ruleRefExpr{
pos: position{line: 1032, col: 52, offset: 30244},
name: "Indent",
},
},
},
},
},
},
{
name: "EQUALToken",
pos: position{line: 1037, col: 1, offset: 30386},
expr: &actionExpr{
pos: position{line: 1037, col: 14, offset: 30399},
run: (*parser).callonEQUALToken1,
expr: &litMatcher{
pos: position{line: 1037, col: 14, offset: 30399},
val: "=",
ignoreCase: false,
want: "\"=\"",
},
},
},
{
name: "LPOINT",
pos: position{line: 1041, col: 1, offset: 30442},
expr: &actionExpr{
pos: position{line: 1041, col: 15, offset: 30456},
run: (*parser).callonLPOINT1,
expr: &seqExpr{
pos: position{line: 1041, col: 15, offset: 30456},
exprs: []any{
&labeledExpr{
pos: position{line: 1041, col: 15, offset: 30456},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 1041, col: 24, offset: 30465},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 1041, col: 41, offset: 30482},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 1041, col: 43, offset: 30484},
name: "LPOINTToken",
},
},
&zeroOrMoreExpr{
pos: position{line: 1041, col: 59, offset: 30500},
expr: &ruleRefExpr{
pos: position{line: 1041, col: 59, offset: 30500},
name: "Indent",
},
},
},
},
},
},
{
name: "LPOINTToken",
pos: position{line: 1046, col: 1, offset: 30643},
expr: &actionExpr{
pos: position{line: 1046, col: 15, offset: 30657},
run: (*parser).callonLPOINTToken1,
expr: &litMatcher{
pos: position{line: 1046, col: 15, offset: 30657},
val: "<",
ignoreCase: false,
want: "\"<\"",
},
},
},
{
name: "RPOINT",
pos: position{line: 1050, col: 1, offset: 30700},
expr: &actionExpr{
pos: position{line: 1050, col: 15, offset: 30714},
run: (*parser).callonRPOINT1,
expr: &seqExpr{
pos: position{line: 1050, col: 15, offset: 30714},
exprs: []any{
&labeledExpr{
pos: position{line: 1050, col: 15, offset: 30714},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 1050, col: 24, offset: 30723},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 1050, col: 41, offset: 30740},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 1050, col: 43, offset: 30742},
name: "RPOINTToken",
},
},
&zeroOrMoreExpr{
pos: position{line: 1050, col: 58, offset: 30757},
expr: &ruleRefExpr{
pos: position{line: 1050, col: 58, offset: 30757},
name: "Indent",
},
},
},
},
},
},
{
name: "RPOINTToken",
pos: position{line: 1055, col: 1, offset: 30900},
expr: &actionExpr{
pos: position{line: 1055, col: 15, offset: 30914},
run: (*parser).callonRPOINTToken1,
expr: &litMatcher{
pos: position{line: 1055, col: 15, offset: 30914},
val: ">",
ignoreCase: false,
want: "\">\"",
},
},
},
{
name: "COMMA",
pos: position{line: 1059, col: 1, offset: 30957},
expr: &actionExpr{
pos: position{line: 1059, col: 15, offset: 30971},
run: (*parser).callonCOMMA1,
expr: &seqExpr{
pos: position{line: 1059, col: 15, offset: 30971},
exprs: []any{
&labeledExpr{
pos: position{line: 1059, col: 15, offset: 30971},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 1059, col: 24, offset: 30980},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 1059, col: 41, offset: 30997},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 1059, col: 43, offset: 30999},
name: "COMMAToken",
},
},
&zeroOrMoreExpr{
pos: position{line: 1059, col: 58, offset: 31014},
expr: &ruleRefExpr{
pos: position{line: 1059, col: 58, offset: 31014},
name: "Indent",
},
},
},
},
},
},
{
name: "COMMAToken",
pos: position{line: 1064, col: 1, offset: 31156},
expr: &actionExpr{
pos: position{line: 1064, col: 14, offset: 31169},
run: (*parser).callonCOMMAToken1,
expr: &litMatcher{
pos: position{line: 1064, col: 14, offset: 31169},
val: ",",
ignoreCase: false,
want: "\",\"",
},
},
},
{
name: "LPAR",
pos: position{line: 1068, col: 1, offset: 31212},
expr: &actionExpr{
pos: position{line: 1068, col: 15, offset: 31226},
run: (*parser).callonLPAR1,
expr: &seqExpr{
pos: position{line: 1068, col: 15, offset: 31226},
exprs: []any{
&labeledExpr{
pos: position{line: 1068, col: 15, offset: 31226},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 1068, col: 24, offset: 31235},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 1068, col: 41, offset: 31252},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 1068, col: 43, offset: 31254},
name: "LPARToken",
},
},
&zeroOrMoreExpr{
pos: position{line: 1068, col: 57, offset: 31268},
expr: &ruleRefExpr{
pos: position{line: 1068, col: 57, offset: 31268},
name: "Indent",
},
},
},
},
},
},
{
name: "LPARToken",
pos: position{line: 1073, col: 1, offset: 31409},
expr: &actionExpr{
pos: position{line: 1073, col: 13, offset: 31421},
run: (*parser).callonLPARToken1,
expr: &litMatcher{
pos: position{line: 1073, col: 13, offset: 31421},
val: "(",
ignoreCase: false,
want: "\"(\"",
},
},
},
{
name: "RPAR",
pos: position{line: 1077, col: 1, offset: 31464},
expr: &actionExpr{
pos: position{line: 1077, col: 15, offset: 31478},
run: (*parser).callonRPAR1,
expr: &seqExpr{
pos: position{line: 1077, col: 15, offset: 31478},
exprs: []any{
&labeledExpr{
pos: position{line: 1077, col: 15, offset: 31478},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 1077, col: 24, offset: 31487},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 1077, col: 41, offset: 31504},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 1077, col: 43, offset: 31506},
name: "RPARToken",
},
},
&zeroOrMoreExpr{
pos: position{line: 1077, col: 57, offset: 31520},
expr: &ruleRefExpr{
pos: position{line: 1077, col: 57, offset: 31520},
name: "Indent",
},
},
},
},
},
},
{
name: "RPARToken",
pos: position{line: 1082, col: 1, offset: 31661},
expr: &actionExpr{
pos: position{line: 1082, col: 13, offset: 31673},
run: (*parser).callonRPARToken1,
expr: &litMatcher{
pos: position{line: 1082, col: 13, offset: 31673},
val: ")",
ignoreCase: false,
want: "\")\"",
},
},
},
{
name: "COLON",
pos: position{line: 1086, col: 1, offset: 31716},
expr: &actionExpr{
pos: position{line: 1086, col: 15, offset: 31730},
run: (*parser).callonCOLON1,
expr: &seqExpr{
pos: position{line: 1086, col: 15, offset: 31730},
exprs: []any{
&labeledExpr{
pos: position{line: 1086, col: 15, offset: 31730},
label: "comments",
expr: &ruleRefExpr{
pos: position{line: 1086, col: 24, offset: 31739},
name: "ReservedComments",
},
},
&labeledExpr{
pos: position{line: 1086, col: 41, offset: 31756},
label: "t",
expr: &ruleRefExpr{
pos: position{line: 1086, col: 43, offset: 31758},
name: "COLONToken",
},
},
&zeroOrMoreExpr{
pos: position{line: 1086, col: 58, offset: 31773},
expr: &ruleRefExpr{
pos: position{line: 1086, col: 58, offset: 31773},
name: "Indent",
},
},
},
},
},
},
{
name: "COLONToken",
pos: position{line: 1091, col: 1, offset: 31915},
expr: &actionExpr{
pos: position{line: 1091, col: 14, offset: 31928},
run: (*parser).callonCOLONToken1,
expr: &litMatcher{
pos: position{line: 1091, col: 14, offset: 31928},
val: ":",
ignoreCase: false,
want: "\":\"",
},
},
},
{
name: "DefinitionStart",
pos: position{line: 1095, col: 1, offset: 31971},
expr: &choiceExpr{
pos: position{line: 1095, col: 19, offset: 31989},
alternatives: []any{
&ruleRefExpr{
pos: position{line: 1095, col: 19, offset: 31989},
name: "STRUCT",
},
&ruleRefExpr{
pos: position{line: 1095, col: 28, offset: 31998},
name: "UNION",
},
&ruleRefExpr{
pos: position{line: 1095, col: 36, offset: 32006},
name: "EXCEPTION",
},
&ruleRefExpr{
pos: position{line: 1095, col: 48, offset: 32018},
name: "ENUM",
},
&ruleRefExpr{
pos: position{line: 1095, col: 55, offset: 32025},
name: "SERVICE",
},
&ruleRefExpr{
pos: position{line: 1095, col: 65, offset: 32035},
name: "CONST",
},
&ruleRefExpr{
pos: position{line: 1095, col: 73, offset: 32043},
name: "TYPEDEF",
},
},
},
},
{
name: "ErrFieldIndex",
pos: position{line: 1097, col: 1, offset: 32052},
expr: &actionExpr{
pos: position{line: 1097, col: 17, offset: 32068},
run: (*parser).callonErrFieldIndex1,
expr: &seqExpr{
pos: position{line: 1097, col: 17, offset: 32068},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1097, col: 17, offset: 32068},
run: (*parser).callonErrFieldIndex3,
},
&zeroOrMoreExpr{
pos: position{line: 1099, col: 3, offset: 32104},
expr: &seqExpr{
pos: position{line: 1099, col: 4, offset: 32105},
exprs: []any{
¬Expr{
pos: position{line: 1099, col: 4, offset: 32105},
expr: &charClassMatcher{
pos: position{line: 1099, col: 6, offset: 32107},
val: "[:\\r\\n]",
chars: []rune{':', '\r', '\n'},
ignoreCase: false,
inverted: false,
},
},
&anyMatcher{
line: 1099, col: 15, offset: 32116,
},
},
},
},
},
},
},
},
{
name: "ErrStructField",
pos: position{line: 1105, col: 1, offset: 32227},
expr: &actionExpr{
pos: position{line: 1105, col: 18, offset: 32244},
run: (*parser).callonErrStructField1,
expr: &seqExpr{
pos: position{line: 1105, col: 18, offset: 32244},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1105, col: 18, offset: 32244},
run: (*parser).callonErrStructField3,
},
&oneOrMoreExpr{
pos: position{line: 1107, col: 3, offset: 32281},
expr: &seqExpr{
pos: position{line: 1107, col: 5, offset: 32283},
exprs: []any{
¬Expr{
pos: position{line: 1107, col: 5, offset: 32283},
expr: &choiceExpr{
pos: position{line: 1107, col: 7, offset: 32285},
alternatives: []any{
&ruleRefExpr{
pos: position{line: 1107, col: 7, offset: 32285},
name: "Field",
},
&seqExpr{
pos: position{line: 1107, col: 16, offset: 32294},
exprs: []any{
&ruleRefExpr{
pos: position{line: 1107, col: 16, offset: 32294},
name: "ReservedComments",
},
&litMatcher{
pos: position{line: 1107, col: 33, offset: 32311},
val: "}",
ignoreCase: false,
want: "\"}\"",
},
&zeroOrMoreExpr{
pos: position{line: 1107, col: 37, offset: 32315},
expr: &ruleRefExpr{
pos: position{line: 1107, col: 37, offset: 32315},
name: "Indent",
},
},
},
},
&ruleRefExpr{
pos: position{line: 1107, col: 48, offset: 32326},
name: "DefinitionStart",
},
},
},
},
&anyMatcher{
line: 1107, col: 66, offset: 32344,
},
},
},
},
},
},
},
},
{
name: "ErrStructIdentifier",
pos: position{line: 1111, col: 1, offset: 32406},
expr: &actionExpr{
pos: position{line: 1111, col: 23, offset: 32428},
run: (*parser).callonErrStructIdentifier1,
expr: &seqExpr{
pos: position{line: 1111, col: 23, offset: 32428},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1111, col: 23, offset: 32428},
run: (*parser).callonErrStructIdentifier3,
},
&zeroOrMoreExpr{
pos: position{line: 1113, col: 3, offset: 32470},
expr: &seqExpr{
pos: position{line: 1113, col: 5, offset: 32472},
exprs: []any{
¬Expr{
pos: position{line: 1113, col: 5, offset: 32472},
expr: &litMatcher{
pos: position{line: 1113, col: 6, offset: 32473},
val: "{",
ignoreCase: false,
want: "\"{\"",
},
},
&anyMatcher{
line: 1113, col: 10, offset: 32477,
},
},
},
},
},
},
},
},
{
name: "ErrStructRCUR",
pos: position{line: 1119, col: 1, offset: 32662},
expr: &actionExpr{
pos: position{line: 1119, col: 17, offset: 32678},
run: (*parser).callonErrStructRCUR1,
expr: &seqExpr{
pos: position{line: 1119, col: 17, offset: 32678},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1119, col: 17, offset: 32678},
run: (*parser).callonErrStructRCUR3,
},
&zeroOrMoreExpr{
pos: position{line: 1121, col: 3, offset: 32719},
expr: &seqExpr{
pos: position{line: 1121, col: 5, offset: 32721},
exprs: []any{
¬Expr{
pos: position{line: 1121, col: 5, offset: 32721},
expr: &ruleRefExpr{
pos: position{line: 1121, col: 6, offset: 32722},
name: "DefinitionStart",
},
},
&anyMatcher{
line: 1121, col: 22, offset: 32738,
},
},
},
},
},
},
},
},
{
name: "ErrStructLCUR",
pos: position{line: 1125, col: 1, offset: 32919},
expr: &actionExpr{
pos: position{line: 1125, col: 17, offset: 32935},
run: (*parser).callonErrStructLCUR1,
expr: &seqExpr{
pos: position{line: 1125, col: 17, offset: 32935},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1125, col: 17, offset: 32935},
run: (*parser).callonErrStructLCUR3,
},
&zeroOrMoreExpr{
pos: position{line: 1127, col: 3, offset: 32976},
expr: &seqExpr{
pos: position{line: 1127, col: 5, offset: 32978},
exprs: []any{
¬Expr{
pos: position{line: 1127, col: 5, offset: 32978},
expr: &litMatcher{
pos: position{line: 1127, col: 6, offset: 32979},
val: "}",
ignoreCase: false,
want: "\"}\"",
},
},
&anyMatcher{
line: 1127, col: 10, offset: 32983,
},
},
},
},
},
},
},
},
{
name: "ErrUnionIdentifier",
pos: position{line: 1133, col: 1, offset: 33112},
expr: &actionExpr{
pos: position{line: 1133, col: 22, offset: 33133},
run: (*parser).callonErrUnionIdentifier1,
expr: &seqExpr{
pos: position{line: 1133, col: 22, offset: 33133},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1133, col: 22, offset: 33133},
run: (*parser).callonErrUnionIdentifier3,
},
&zeroOrMoreExpr{
pos: position{line: 1135, col: 3, offset: 33174},
expr: &seqExpr{
pos: position{line: 1135, col: 5, offset: 33176},
exprs: []any{
¬Expr{
pos: position{line: 1135, col: 5, offset: 33176},
expr: &litMatcher{
pos: position{line: 1135, col: 6, offset: 33177},
val: "{",
ignoreCase: false,
want: "\"{\"",
},
},
&anyMatcher{
line: 1135, col: 10, offset: 33181,
},
},
},
},
},
},
},
},
{
name: "ErrUnionRCUR",
pos: position{line: 1141, col: 1, offset: 33326},
expr: &actionExpr{
pos: position{line: 1141, col: 16, offset: 33341},
run: (*parser).callonErrUnionRCUR1,
expr: &seqExpr{
pos: position{line: 1141, col: 16, offset: 33341},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1141, col: 16, offset: 33341},
run: (*parser).callonErrUnionRCUR3,
},
&zeroOrMoreExpr{
pos: position{line: 1143, col: 3, offset: 33381},
expr: &seqExpr{
pos: position{line: 1143, col: 5, offset: 33383},
exprs: []any{
¬Expr{
pos: position{line: 1143, col: 5, offset: 33383},
expr: &ruleRefExpr{
pos: position{line: 1143, col: 6, offset: 33384},
name: "DefinitionStart",
},
},
&anyMatcher{
line: 1143, col: 22, offset: 33400,
},
},
},
},
},
},
},
},
{
name: "ErrUnionLCUR",
pos: position{line: 1147, col: 1, offset: 33446},
expr: &actionExpr{
pos: position{line: 1147, col: 16, offset: 33461},
run: (*parser).callonErrUnionLCUR1,
expr: &seqExpr{
pos: position{line: 1147, col: 16, offset: 33461},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1147, col: 16, offset: 33461},
run: (*parser).callonErrUnionLCUR3,
},
&zeroOrMoreExpr{
pos: position{line: 1149, col: 3, offset: 33501},
expr: &seqExpr{
pos: position{line: 1149, col: 5, offset: 33503},
exprs: []any{
¬Expr{
pos: position{line: 1149, col: 5, offset: 33503},
expr: &litMatcher{
pos: position{line: 1149, col: 6, offset: 33504},
val: "}",
ignoreCase: false,
want: "\"}\"",
},
},
&anyMatcher{
line: 1149, col: 10, offset: 33508,
},
},
},
},
},
},
},
},
{
name: "ErrUnionField",
pos: position{line: 1153, col: 1, offset: 33607},
expr: &actionExpr{
pos: position{line: 1153, col: 17, offset: 33623},
run: (*parser).callonErrUnionField1,
expr: &seqExpr{
pos: position{line: 1153, col: 17, offset: 33623},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1153, col: 17, offset: 33623},
run: (*parser).callonErrUnionField3,
},
&oneOrMoreExpr{
pos: position{line: 1155, col: 3, offset: 33659},
expr: &seqExpr{
pos: position{line: 1155, col: 5, offset: 33661},
exprs: []any{
¬Expr{
pos: position{line: 1155, col: 5, offset: 33661},
expr: &choiceExpr{
pos: position{line: 1155, col: 7, offset: 33663},
alternatives: []any{
&ruleRefExpr{
pos: position{line: 1155, col: 7, offset: 33663},
name: "Field",
},
&seqExpr{
pos: position{line: 1155, col: 16, offset: 33672},
exprs: []any{
&ruleRefExpr{
pos: position{line: 1155, col: 16, offset: 33672},
name: "ReservedComments",
},
&litMatcher{
pos: position{line: 1155, col: 33, offset: 33689},
val: "}",
ignoreCase: false,
want: "\"}\"",
},
&zeroOrMoreExpr{
pos: position{line: 1155, col: 37, offset: 33693},
expr: &ruleRefExpr{
pos: position{line: 1155, col: 37, offset: 33693},
name: "Indent",
},
},
},
},
&ruleRefExpr{
pos: position{line: 1155, col: 48, offset: 33704},
name: "DefinitionStart",
},
},
},
},
&anyMatcher{
line: 1155, col: 66, offset: 33722,
},
},
},
},
},
},
},
},
{
name: "ErrExceptionIdentifier",
pos: position{line: 1161, col: 1, offset: 33798},
expr: &actionExpr{
pos: position{line: 1161, col: 26, offset: 33823},
run: (*parser).callonErrExceptionIdentifier1,
expr: &seqExpr{
pos: position{line: 1161, col: 26, offset: 33823},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1161, col: 26, offset: 33823},
run: (*parser).callonErrExceptionIdentifier3,
},
&zeroOrMoreExpr{
pos: position{line: 1163, col: 3, offset: 33868},
expr: &seqExpr{
pos: position{line: 1163, col: 5, offset: 33870},
exprs: []any{
¬Expr{
pos: position{line: 1163, col: 5, offset: 33870},
expr: &litMatcher{
pos: position{line: 1163, col: 6, offset: 33871},
val: "{",
ignoreCase: false,
want: "\"{\"",
},
},
&anyMatcher{
line: 1163, col: 10, offset: 33875,
},
},
},
},
},
},
},
},
{
name: "ErrExceptionRCUR",
pos: position{line: 1169, col: 1, offset: 34020},
expr: &actionExpr{
pos: position{line: 1169, col: 20, offset: 34039},
run: (*parser).callonErrExceptionRCUR1,
expr: &seqExpr{
pos: position{line: 1169, col: 20, offset: 34039},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1169, col: 20, offset: 34039},
run: (*parser).callonErrExceptionRCUR3,
},
&zeroOrMoreExpr{
pos: position{line: 1171, col: 3, offset: 34083},
expr: &seqExpr{
pos: position{line: 1171, col: 5, offset: 34085},
exprs: []any{
¬Expr{
pos: position{line: 1171, col: 5, offset: 34085},
expr: &ruleRefExpr{
pos: position{line: 1171, col: 6, offset: 34086},
name: "DefinitionStart",
},
},
&anyMatcher{
line: 1171, col: 22, offset: 34102,
},
},
},
},
},
},
},
},
{
name: "ErrExceptionLCUR",
pos: position{line: 1175, col: 1, offset: 34148},
expr: &actionExpr{
pos: position{line: 1175, col: 20, offset: 34167},
run: (*parser).callonErrExceptionLCUR1,
expr: &seqExpr{
pos: position{line: 1175, col: 20, offset: 34167},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1175, col: 20, offset: 34167},
run: (*parser).callonErrExceptionLCUR3,
},
&zeroOrMoreExpr{
pos: position{line: 1177, col: 3, offset: 34211},
expr: &seqExpr{
pos: position{line: 1177, col: 5, offset: 34213},
exprs: []any{
¬Expr{
pos: position{line: 1177, col: 5, offset: 34213},
expr: &litMatcher{
pos: position{line: 1177, col: 6, offset: 34214},
val: "}",
ignoreCase: false,
want: "\"}\"",
},
},
&anyMatcher{
line: 1177, col: 10, offset: 34218,
},
},
},
},
},
},
},
},
{
name: "ErrExceptionField",
pos: position{line: 1181, col: 1, offset: 34337},
expr: &actionExpr{
pos: position{line: 1181, col: 21, offset: 34357},
run: (*parser).callonErrExceptionField1,
expr: &seqExpr{
pos: position{line: 1181, col: 21, offset: 34357},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1181, col: 21, offset: 34357},
run: (*parser).callonErrExceptionField3,
},
&oneOrMoreExpr{
pos: position{line: 1183, col: 3, offset: 34397},
expr: &seqExpr{
pos: position{line: 1183, col: 5, offset: 34399},
exprs: []any{
¬Expr{
pos: position{line: 1183, col: 5, offset: 34399},
expr: &choiceExpr{
pos: position{line: 1183, col: 7, offset: 34401},
alternatives: []any{
&ruleRefExpr{
pos: position{line: 1183, col: 7, offset: 34401},
name: "Field",
},
&seqExpr{
pos: position{line: 1183, col: 16, offset: 34410},
exprs: []any{
&ruleRefExpr{
pos: position{line: 1183, col: 16, offset: 34410},
name: "ReservedComments",
},
&litMatcher{
pos: position{line: 1183, col: 33, offset: 34427},
val: "}",
ignoreCase: false,
want: "\"}\"",
},
&zeroOrMoreExpr{
pos: position{line: 1183, col: 37, offset: 34431},
expr: &ruleRefExpr{
pos: position{line: 1183, col: 37, offset: 34431},
name: "Indent",
},
},
},
},
&ruleRefExpr{
pos: position{line: 1183, col: 48, offset: 34442},
name: "DefinitionStart",
},
},
},
},
&anyMatcher{
line: 1183, col: 66, offset: 34460,
},
},
},
},
},
},
},
},
{
name: "ErrEnumIdentifier",
pos: position{line: 1189, col: 1, offset: 34531},
expr: &actionExpr{
pos: position{line: 1189, col: 21, offset: 34551},
run: (*parser).callonErrEnumIdentifier1,
expr: &seqExpr{
pos: position{line: 1189, col: 21, offset: 34551},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1189, col: 21, offset: 34551},
run: (*parser).callonErrEnumIdentifier3,
},
&zeroOrMoreExpr{
pos: position{line: 1191, col: 3, offset: 34591},
expr: &seqExpr{
pos: position{line: 1191, col: 5, offset: 34593},
exprs: []any{
¬Expr{
pos: position{line: 1191, col: 5, offset: 34593},
expr: &litMatcher{
pos: position{line: 1191, col: 6, offset: 34594},
val: "{",
ignoreCase: false,
want: "\"{\"",
},
},
&anyMatcher{
line: 1191, col: 10, offset: 34598,
},
},
},
},
},
},
},
},
{
name: "ErrEnumRCUR",
pos: position{line: 1197, col: 1, offset: 34748},
expr: &actionExpr{
pos: position{line: 1197, col: 15, offset: 34762},
run: (*parser).callonErrEnumRCUR1,
expr: &seqExpr{
pos: position{line: 1197, col: 15, offset: 34762},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1197, col: 15, offset: 34762},
run: (*parser).callonErrEnumRCUR3,
},
&zeroOrMoreExpr{
pos: position{line: 1199, col: 3, offset: 34801},
expr: &seqExpr{
pos: position{line: 1199, col: 5, offset: 34803},
exprs: []any{
¬Expr{
pos: position{line: 1199, col: 5, offset: 34803},
expr: &ruleRefExpr{
pos: position{line: 1199, col: 6, offset: 34804},
name: "DefinitionStart",
},
},
&anyMatcher{
line: 1199, col: 22, offset: 34820,
},
},
},
},
},
},
},
},
{
name: "ErrEnumLCUR",
pos: position{line: 1203, col: 1, offset: 34866},
expr: &actionExpr{
pos: position{line: 1203, col: 15, offset: 34880},
run: (*parser).callonErrEnumLCUR1,
expr: &seqExpr{
pos: position{line: 1203, col: 15, offset: 34880},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1203, col: 15, offset: 34880},
run: (*parser).callonErrEnumLCUR3,
},
&zeroOrMoreExpr{
pos: position{line: 1205, col: 3, offset: 34919},
expr: &seqExpr{
pos: position{line: 1205, col: 5, offset: 34921},
exprs: []any{
¬Expr{
pos: position{line: 1205, col: 5, offset: 34921},
expr: &litMatcher{
pos: position{line: 1205, col: 6, offset: 34922},
val: "}",
ignoreCase: false,
want: "\"}\"",
},
},
&anyMatcher{
line: 1205, col: 10, offset: 34926,
},
},
},
},
},
},
},
},
{
name: "ErrEnumValue",
pos: position{line: 1209, col: 1, offset: 35045},
expr: &actionExpr{
pos: position{line: 1209, col: 16, offset: 35060},
run: (*parser).callonErrEnumValue1,
expr: &seqExpr{
pos: position{line: 1209, col: 16, offset: 35060},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1209, col: 16, offset: 35060},
run: (*parser).callonErrEnumValue3,
},
&zeroOrMoreExpr{
pos: position{line: 1211, col: 3, offset: 35095},
expr: &seqExpr{
pos: position{line: 1211, col: 5, offset: 35097},
exprs: []any{
¬Expr{
pos: position{line: 1211, col: 5, offset: 35097},
expr: &choiceExpr{
pos: position{line: 1211, col: 7, offset: 35099},
alternatives: []any{
&ruleRefExpr{
pos: position{line: 1211, col: 7, offset: 35099},
name: "Field",
},
&seqExpr{
pos: position{line: 1211, col: 16, offset: 35108},
exprs: []any{
&ruleRefExpr{
pos: position{line: 1211, col: 16, offset: 35108},
name: "ReservedComments",
},
&litMatcher{
pos: position{line: 1211, col: 33, offset: 35125},
val: "}",
ignoreCase: false,
want: "\"}\"",
},
&zeroOrMoreExpr{
pos: position{line: 1211, col: 37, offset: 35129},
expr: &ruleRefExpr{
pos: position{line: 1211, col: 37, offset: 35129},
name: "Indent",
},
},
},
},
&ruleRefExpr{
pos: position{line: 1211, col: 48, offset: 35140},
name: "DefinitionStart",
},
},
},
},
&anyMatcher{
line: 1211, col: 66, offset: 35158,
},
},
},
},
},
},
},
},
{
name: "ErrEnumValueIntConstant",
pos: position{line: 1215, col: 1, offset: 35224},
expr: &actionExpr{
pos: position{line: 1215, col: 27, offset: 35250},
run: (*parser).callonErrEnumValueIntConstant1,
expr: &seqExpr{
pos: position{line: 1215, col: 27, offset: 35250},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1215, col: 27, offset: 35250},
run: (*parser).callonErrEnumValueIntConstant3,
},
&zeroOrMoreExpr{
pos: position{line: 1217, col: 3, offset: 35296},
expr: &seqExpr{
pos: position{line: 1217, col: 5, offset: 35298},
exprs: []any{
¬Expr{
pos: position{line: 1217, col: 5, offset: 35298},
expr: &ruleRefExpr{
pos: position{line: 1217, col: 7, offset: 35300},
name: "CarriageReturnLineFeed",
},
},
&anyMatcher{
line: 1217, col: 32, offset: 35325,
},
},
},
},
},
},
},
},
{
name: "ErrTypedefIdentifier",
pos: position{line: 1223, col: 1, offset: 35408},
expr: &actionExpr{
pos: position{line: 1223, col: 24, offset: 35431},
run: (*parser).callonErrTypedefIdentifier1,
expr: &seqExpr{
pos: position{line: 1223, col: 24, offset: 35431},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1223, col: 24, offset: 35431},
run: (*parser).callonErrTypedefIdentifier3,
},
&zeroOrMoreExpr{
pos: position{line: 1225, col: 3, offset: 35474},
expr: &seqExpr{
pos: position{line: 1225, col: 5, offset: 35476},
exprs: []any{
¬Expr{
pos: position{line: 1225, col: 5, offset: 35476},
expr: &ruleRefExpr{
pos: position{line: 1225, col: 7, offset: 35478},
name: "CarriageReturnLineFeed",
},
},
&anyMatcher{
line: 1225, col: 31, offset: 35502,
},
},
},
},
},
},
},
},
{
name: "ErrConstIdentifier",
pos: position{line: 1232, col: 1, offset: 35586},
expr: &actionExpr{
pos: position{line: 1232, col: 22, offset: 35607},
run: (*parser).callonErrConstIdentifier1,
expr: &seqExpr{
pos: position{line: 1232, col: 22, offset: 35607},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1232, col: 22, offset: 35607},
run: (*parser).callonErrConstIdentifier3,
},
&zeroOrMoreExpr{
pos: position{line: 1234, col: 3, offset: 35648},
expr: &seqExpr{
pos: position{line: 1234, col: 4, offset: 35649},
exprs: []any{
¬Expr{
pos: position{line: 1234, col: 4, offset: 35649},
expr: &ruleRefExpr{
pos: position{line: 1234, col: 5, offset: 35650},
name: "EQUAL",
},
},
&anyMatcher{
line: 1234, col: 11, offset: 35656,
},
},
},
},
},
},
},
},
{
name: "ErrConstMissingValue",
pos: position{line: 1240, col: 1, offset: 35731},
expr: &actionExpr{
pos: position{line: 1240, col: 24, offset: 35754},
run: (*parser).callonErrConstMissingValue1,
expr: &seqExpr{
pos: position{line: 1240, col: 24, offset: 35754},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1240, col: 24, offset: 35754},
run: (*parser).callonErrConstMissingValue3,
},
&zeroOrMoreExpr{
pos: position{line: 1242, col: 3, offset: 35797},
expr: &seqExpr{
pos: position{line: 1242, col: 4, offset: 35798},
exprs: []any{
¬Expr{
pos: position{line: 1242, col: 4, offset: 35798},
expr: &charClassMatcher{
pos: position{line: 1242, col: 5, offset: 35799},
val: "[\\r\\n]",
chars: []rune{'\r', '\n'},
ignoreCase: false,
inverted: false,
},
},
&anyMatcher{
line: 1242, col: 12, offset: 35806,
},
},
},
},
},
},
},
},
{
name: "ErrConstConstValue",
pos: position{line: 1246, col: 1, offset: 35872},
expr: &actionExpr{
pos: position{line: 1246, col: 22, offset: 35893},
run: (*parser).callonErrConstConstValue1,
expr: &seqExpr{
pos: position{line: 1246, col: 22, offset: 35893},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1246, col: 22, offset: 35893},
run: (*parser).callonErrConstConstValue3,
},
&zeroOrMoreExpr{
pos: position{line: 1248, col: 3, offset: 35934},
expr: &seqExpr{
pos: position{line: 1248, col: 4, offset: 35935},
exprs: []any{
¬Expr{
pos: position{line: 1248, col: 4, offset: 35935},
expr: &charClassMatcher{
pos: position{line: 1248, col: 5, offset: 35936},
val: "[\\r\\n]",
chars: []rune{'\r', '\n'},
ignoreCase: false,
inverted: false,
},
},
&anyMatcher{
line: 1248, col: 12, offset: 35943,
},
},
},
},
},
},
},
},
{
name: "ErrServiceIdentifier",
pos: position{line: 1254, col: 1, offset: 36021},
expr: &actionExpr{
pos: position{line: 1254, col: 24, offset: 36044},
run: (*parser).callonErrServiceIdentifier1,
expr: &seqExpr{
pos: position{line: 1254, col: 24, offset: 36044},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1254, col: 24, offset: 36044},
run: (*parser).callonErrServiceIdentifier3,
},
&zeroOrMoreExpr{
pos: position{line: 1256, col: 3, offset: 36087},
expr: &seqExpr{
pos: position{line: 1256, col: 5, offset: 36089},
exprs: []any{
¬Expr{
pos: position{line: 1256, col: 5, offset: 36089},
expr: &litMatcher{
pos: position{line: 1256, col: 6, offset: 36090},
val: "{",
ignoreCase: false,
want: "\"{\"",
},
},
&anyMatcher{
line: 1256, col: 10, offset: 36094,
},
},
},
},
},
},
},
},
{
name: "ErrServiceRCUR",
pos: position{line: 1262, col: 1, offset: 36239},
expr: &actionExpr{
pos: position{line: 1262, col: 18, offset: 36256},
run: (*parser).callonErrServiceRCUR1,
expr: &seqExpr{
pos: position{line: 1262, col: 18, offset: 36256},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1262, col: 18, offset: 36256},
run: (*parser).callonErrServiceRCUR3,
},
&zeroOrMoreExpr{
pos: position{line: 1264, col: 3, offset: 36298},
expr: &seqExpr{
pos: position{line: 1264, col: 5, offset: 36300},
exprs: []any{
¬Expr{
pos: position{line: 1264, col: 5, offset: 36300},
expr: &ruleRefExpr{
pos: position{line: 1264, col: 6, offset: 36301},
name: "DefinitionStart",
},
},
&anyMatcher{
line: 1264, col: 22, offset: 36317,
},
},
},
},
},
},
},
},
{
name: "ErrServiceFunction",
pos: position{line: 1268, col: 1, offset: 36363},
expr: &actionExpr{
pos: position{line: 1268, col: 23, offset: 36385},
run: (*parser).callonErrServiceFunction1,
expr: &seqExpr{
pos: position{line: 1268, col: 23, offset: 36385},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1268, col: 23, offset: 36385},
run: (*parser).callonErrServiceFunction3,
},
&zeroOrMoreExpr{
pos: position{line: 1270, col: 3, offset: 36426},
expr: &seqExpr{
pos: position{line: 1270, col: 5, offset: 36428},
exprs: []any{
¬Expr{
pos: position{line: 1270, col: 5, offset: 36428},
expr: &charClassMatcher{
pos: position{line: 1270, col: 6, offset: 36429},
val: "[\\r\\n]",
chars: []rune{'\r', '\n'},
ignoreCase: false,
inverted: false,
},
},
&anyMatcher{
line: 1270, col: 13, offset: 36436,
},
},
},
},
},
},
},
},
{
name: "ErrFunctionIdentifier",
pos: position{line: 1275, col: 1, offset: 36512},
expr: &actionExpr{
pos: position{line: 1275, col: 25, offset: 36536},
run: (*parser).callonErrFunctionIdentifier1,
expr: &seqExpr{
pos: position{line: 1275, col: 25, offset: 36536},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1275, col: 25, offset: 36536},
run: (*parser).callonErrFunctionIdentifier3,
},
&zeroOrMoreExpr{
pos: position{line: 1277, col: 3, offset: 36580},
expr: &seqExpr{
pos: position{line: 1277, col: 5, offset: 36582},
exprs: []any{
¬Expr{
pos: position{line: 1277, col: 5, offset: 36582},
expr: &litMatcher{
pos: position{line: 1277, col: 6, offset: 36583},
val: "(",
ignoreCase: false,
want: "\"(\"",
},
},
&anyMatcher{
line: 1277, col: 10, offset: 36587,
},
},
},
},
},
},
},
},
{
name: "ErrFunctionArgument",
pos: position{line: 1283, col: 1, offset: 36765},
expr: &actionExpr{
pos: position{line: 1283, col: 23, offset: 36787},
run: (*parser).callonErrFunctionArgument1,
expr: &seqExpr{
pos: position{line: 1283, col: 23, offset: 36787},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1283, col: 23, offset: 36787},
run: (*parser).callonErrFunctionArgument3,
},
&zeroOrMoreExpr{
pos: position{line: 1285, col: 3, offset: 36829},
expr: &seqExpr{
pos: position{line: 1285, col: 5, offset: 36831},
exprs: []any{
¬Expr{
pos: position{line: 1285, col: 5, offset: 36831},
expr: &charClassMatcher{
pos: position{line: 1285, col: 6, offset: 36832},
val: "[,;)\\r\\n]",
chars: []rune{',', ';', ')', '\r', '\n'},
ignoreCase: false,
inverted: false,
},
},
&anyMatcher{
line: 1285, col: 16, offset: 36842,
},
},
},
},
},
},
},
},
{
name: "ErrLiteral1MissingRight",
pos: position{line: 1292, col: 1, offset: 36922},
expr: &actionExpr{
pos: position{line: 1292, col: 27, offset: 36948},
run: (*parser).callonErrLiteral1MissingRight1,
expr: &seqExpr{
pos: position{line: 1292, col: 27, offset: 36948},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1292, col: 27, offset: 36948},
run: (*parser).callonErrLiteral1MissingRight3,
},
&zeroOrMoreExpr{
pos: position{line: 1294, col: 3, offset: 36994},
expr: &seqExpr{
pos: position{line: 1294, col: 5, offset: 36996},
exprs: []any{
¬Expr{
pos: position{line: 1294, col: 5, offset: 36996},
expr: &charClassMatcher{
pos: position{line: 1294, col: 6, offset: 36997},
val: "[\\r\\n]",
chars: []rune{'\r', '\n'},
ignoreCase: false,
inverted: false,
},
},
&anyMatcher{
line: 1294, col: 13, offset: 37004,
},
},
},
},
},
},
},
},
{
name: "ErrLiteral1",
pos: position{line: 1298, col: 1, offset: 37107},
expr: &actionExpr{
pos: position{line: 1298, col: 15, offset: 37121},
run: (*parser).callonErrLiteral11,
expr: &seqExpr{
pos: position{line: 1298, col: 15, offset: 37121},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1298, col: 15, offset: 37121},
run: (*parser).callonErrLiteral13,
},
&zeroOrMoreExpr{
pos: position{line: 1300, col: 3, offset: 37155},
expr: &seqExpr{
pos: position{line: 1300, col: 5, offset: 37157},
exprs: []any{
¬Expr{
pos: position{line: 1300, col: 5, offset: 37157},
expr: &charClassMatcher{
pos: position{line: 1300, col: 6, offset: 37158},
val: "[\\r\\n]",
chars: []rune{'\r', '\n'},
ignoreCase: false,
inverted: false,
},
},
&anyMatcher{
line: 1300, col: 13, offset: 37165,
},
},
},
},
},
},
},
},
{
name: "ErrLiteral2MissingRight",
pos: position{line: 1304, col: 1, offset: 37268},
expr: &actionExpr{
pos: position{line: 1304, col: 27, offset: 37294},
run: (*parser).callonErrLiteral2MissingRight1,
expr: &seqExpr{
pos: position{line: 1304, col: 27, offset: 37294},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1304, col: 27, offset: 37294},
run: (*parser).callonErrLiteral2MissingRight3,
},
&zeroOrMoreExpr{
pos: position{line: 1306, col: 3, offset: 37340},
expr: &seqExpr{
pos: position{line: 1306, col: 5, offset: 37342},
exprs: []any{
¬Expr{
pos: position{line: 1306, col: 5, offset: 37342},
expr: &charClassMatcher{
pos: position{line: 1306, col: 6, offset: 37343},
val: "[\\r\\n]",
chars: []rune{'\r', '\n'},
ignoreCase: false,
inverted: false,
},
},
&anyMatcher{
line: 1306, col: 13, offset: 37350,
},
},
},
},
},
},
},
},
{
name: "ErrLiteral2",
pos: position{line: 1310, col: 1, offset: 37453},
expr: &actionExpr{
pos: position{line: 1310, col: 15, offset: 37467},
run: (*parser).callonErrLiteral21,
expr: &seqExpr{
pos: position{line: 1310, col: 15, offset: 37467},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1310, col: 15, offset: 37467},
run: (*parser).callonErrLiteral23,
},
&zeroOrMoreExpr{
pos: position{line: 1312, col: 3, offset: 37501},
expr: &seqExpr{
pos: position{line: 1312, col: 5, offset: 37503},
exprs: []any{
¬Expr{
pos: position{line: 1312, col: 5, offset: 37503},
expr: &charClassMatcher{
pos: position{line: 1312, col: 6, offset: 37504},
val: "[\\r\\n]",
chars: []rune{'\r', '\n'},
ignoreCase: false,
inverted: false,
},
},
&anyMatcher{
line: 1312, col: 13, offset: 37511,
},
},
},
},
},
},
},
},
{
name: "ErrConst",
pos: position{line: 1316, col: 1, offset: 37614},
expr: &actionExpr{
pos: position{line: 1316, col: 12, offset: 37625},
run: (*parser).callonErrConst1,
expr: &seqExpr{
pos: position{line: 1316, col: 12, offset: 37625},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1316, col: 12, offset: 37625},
run: (*parser).callonErrConst3,
},
&zeroOrMoreExpr{
pos: position{line: 1318, col: 3, offset: 37656},
expr: &seqExpr{
pos: position{line: 1318, col: 4, offset: 37657},
exprs: []any{
¬Expr{
pos: position{line: 1318, col: 4, offset: 37657},
expr: &charClassMatcher{
pos: position{line: 1318, col: 5, offset: 37658},
val: "[\\r\\n]",
chars: []rune{'\r', '\n'},
ignoreCase: false,
inverted: false,
},
},
&anyMatcher{
line: 1318, col: 12, offset: 37665,
},
},
},
},
},
},
},
},
{
name: "ErrTypedef",
pos: position{line: 1322, col: 1, offset: 37766},
expr: &actionExpr{
pos: position{line: 1322, col: 14, offset: 37779},
run: (*parser).callonErrTypedef1,
expr: &seqExpr{
pos: position{line: 1322, col: 14, offset: 37779},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1322, col: 14, offset: 37779},
run: (*parser).callonErrTypedef3,
},
&zeroOrMoreExpr{
pos: position{line: 1324, col: 3, offset: 37812},
expr: &seqExpr{
pos: position{line: 1324, col: 4, offset: 37813},
exprs: []any{
¬Expr{
pos: position{line: 1324, col: 4, offset: 37813},
expr: &charClassMatcher{
pos: position{line: 1324, col: 5, offset: 37814},
val: "[\\r\\n]",
chars: []rune{'\r', '\n'},
ignoreCase: false,
inverted: false,
},
},
&anyMatcher{
line: 1324, col: 12, offset: 37821,
},
},
},
},
},
},
},
},
{
name: "ErrEnum",
pos: position{line: 1328, col: 1, offset: 37925},
expr: &actionExpr{
pos: position{line: 1328, col: 11, offset: 37935},
run: (*parser).callonErrEnum1,
expr: &seqExpr{
pos: position{line: 1328, col: 11, offset: 37935},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1328, col: 11, offset: 37935},
run: (*parser).callonErrEnum3,
},
&zeroOrMoreExpr{
pos: position{line: 1330, col: 3, offset: 37965},
expr: &seqExpr{
pos: position{line: 1330, col: 4, offset: 37966},
exprs: []any{
¬Expr{
pos: position{line: 1330, col: 4, offset: 37966},
expr: &charClassMatcher{
pos: position{line: 1330, col: 5, offset: 37967},
val: "[\\r\\n]",
chars: []rune{'\r', '\n'},
ignoreCase: false,
inverted: false,
},
},
&anyMatcher{
line: 1330, col: 12, offset: 37974,
},
},
},
},
},
},
},
},
{
name: "ErrService",
pos: position{line: 1334, col: 1, offset: 38075},
expr: &actionExpr{
pos: position{line: 1334, col: 14, offset: 38088},
run: (*parser).callonErrService1,
expr: &seqExpr{
pos: position{line: 1334, col: 14, offset: 38088},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1334, col: 14, offset: 38088},
run: (*parser).callonErrService3,
},
&zeroOrMoreExpr{
pos: position{line: 1336, col: 3, offset: 38121},
expr: &seqExpr{
pos: position{line: 1336, col: 4, offset: 38122},
exprs: []any{
¬Expr{
pos: position{line: 1336, col: 4, offset: 38122},
expr: &charClassMatcher{
pos: position{line: 1336, col: 5, offset: 38123},
val: "[\\r\\n]",
chars: []rune{'\r', '\n'},
ignoreCase: false,
inverted: false,
},
},
&anyMatcher{
line: 1336, col: 12, offset: 38130,
},
},
},
},
},
},
},
},
{
name: "ErrStruct",
pos: position{line: 1340, col: 1, offset: 38234},
expr: &actionExpr{
pos: position{line: 1340, col: 13, offset: 38246},
run: (*parser).callonErrStruct1,
expr: &seqExpr{
pos: position{line: 1340, col: 13, offset: 38246},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1340, col: 13, offset: 38246},
run: (*parser).callonErrStruct3,
},
&zeroOrMoreExpr{
pos: position{line: 1342, col: 3, offset: 38278},
expr: &seqExpr{
pos: position{line: 1342, col: 4, offset: 38279},
exprs: []any{
¬Expr{
pos: position{line: 1342, col: 4, offset: 38279},
expr: &charClassMatcher{
pos: position{line: 1342, col: 5, offset: 38280},
val: "[\\r\\n]",
chars: []rune{'\r', '\n'},
ignoreCase: false,
inverted: false,
},
},
&anyMatcher{
line: 1342, col: 12, offset: 38287,
},
},
},
},
},
},
},
},
{
name: "ErrUnion",
pos: position{line: 1346, col: 1, offset: 38390},
expr: &actionExpr{
pos: position{line: 1346, col: 12, offset: 38401},
run: (*parser).callonErrUnion1,
expr: &seqExpr{
pos: position{line: 1346, col: 12, offset: 38401},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1346, col: 12, offset: 38401},
run: (*parser).callonErrUnion3,
},
&zeroOrMoreExpr{
pos: position{line: 1348, col: 3, offset: 38432},
expr: &seqExpr{
pos: position{line: 1348, col: 4, offset: 38433},
exprs: []any{
¬Expr{
pos: position{line: 1348, col: 4, offset: 38433},
expr: &charClassMatcher{
pos: position{line: 1348, col: 5, offset: 38434},
val: "[\\r\\n]",
chars: []rune{'\r', '\n'},
ignoreCase: false,
inverted: false,
},
},
&anyMatcher{
line: 1348, col: 12, offset: 38441,
},
},
},
},
},
},
},
},
{
name: "ErrException",
pos: position{line: 1352, col: 1, offset: 38543},
expr: &actionExpr{
pos: position{line: 1352, col: 16, offset: 38558},
run: (*parser).callonErrException1,
expr: &seqExpr{
pos: position{line: 1352, col: 16, offset: 38558},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1352, col: 16, offset: 38558},
run: (*parser).callonErrException3,
},
&zeroOrMoreExpr{
pos: position{line: 1354, col: 3, offset: 38593},
expr: &seqExpr{
pos: position{line: 1354, col: 4, offset: 38594},
exprs: []any{
¬Expr{
pos: position{line: 1354, col: 4, offset: 38594},
expr: &charClassMatcher{
pos: position{line: 1354, col: 5, offset: 38595},
val: "[\\r\\n]",
chars: []rune{'\r', '\n'},
ignoreCase: false,
inverted: false,
},
},
&anyMatcher{
line: 1354, col: 12, offset: 38602,
},
},
},
},
},
},
},
},
{
name: "ErrDefinition",
pos: position{line: 1358, col: 1, offset: 38708},
expr: &actionExpr{
pos: position{line: 1358, col: 17, offset: 38724},
run: (*parser).callonErrDefinition1,
expr: &seqExpr{
pos: position{line: 1358, col: 17, offset: 38724},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1358, col: 17, offset: 38724},
run: (*parser).callonErrDefinition3,
},
&zeroOrMoreExpr{
pos: position{line: 1360, col: 3, offset: 38760},
expr: &seqExpr{
pos: position{line: 1360, col: 4, offset: 38761},
exprs: []any{
¬Expr{
pos: position{line: 1360, col: 4, offset: 38761},
expr: &charClassMatcher{
pos: position{line: 1360, col: 5, offset: 38762},
val: "[\\r\\n]",
chars: []rune{'\r', '\n'},
ignoreCase: false,
inverted: false,
},
},
&anyMatcher{
line: 1360, col: 12, offset: 38769,
},
},
},
},
},
},
},
},
{
name: "ErrInclude",
pos: position{line: 1364, col: 1, offset: 38875},
expr: &actionExpr{
pos: position{line: 1364, col: 14, offset: 38888},
run: (*parser).callonErrInclude1,
expr: &seqExpr{
pos: position{line: 1364, col: 14, offset: 38888},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1364, col: 14, offset: 38888},
run: (*parser).callonErrInclude3,
},
&zeroOrMoreExpr{
pos: position{line: 1366, col: 3, offset: 38921},
expr: &seqExpr{
pos: position{line: 1366, col: 4, offset: 38922},
exprs: []any{
¬Expr{
pos: position{line: 1366, col: 4, offset: 38922},
expr: &charClassMatcher{
pos: position{line: 1366, col: 5, offset: 38923},
val: "[\\r\\n]",
chars: []rune{'\r', '\n'},
ignoreCase: false,
inverted: false,
},
},
&anyMatcher{
line: 1366, col: 12, offset: 38930,
},
},
},
},
},
},
},
},
{
name: "ErrCppInclude",
pos: position{line: 1370, col: 1, offset: 39033},
expr: &actionExpr{
pos: position{line: 1370, col: 17, offset: 39049},
run: (*parser).callonErrCppInclude1,
expr: &seqExpr{
pos: position{line: 1370, col: 17, offset: 39049},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1370, col: 17, offset: 39049},
run: (*parser).callonErrCppInclude3,
},
&zeroOrMoreExpr{
pos: position{line: 1372, col: 3, offset: 39085},
expr: &seqExpr{
pos: position{line: 1372, col: 4, offset: 39086},
exprs: []any{
¬Expr{
pos: position{line: 1372, col: 4, offset: 39086},
expr: &charClassMatcher{
pos: position{line: 1372, col: 5, offset: 39087},
val: "[\\r\\n]",
chars: []rune{'\r', '\n'},
ignoreCase: false,
inverted: false,
},
},
&anyMatcher{
line: 1372, col: 12, offset: 39094,
},
},
},
},
},
},
},
},
{
name: "ErrNamespace",
pos: position{line: 1376, col: 1, offset: 39200},
expr: &actionExpr{
pos: position{line: 1376, col: 16, offset: 39215},
run: (*parser).callonErrNamespace1,
expr: &seqExpr{
pos: position{line: 1376, col: 16, offset: 39215},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1376, col: 16, offset: 39215},
run: (*parser).callonErrNamespace3,
},
&zeroOrMoreExpr{
pos: position{line: 1378, col: 3, offset: 39250},
expr: &seqExpr{
pos: position{line: 1378, col: 4, offset: 39251},
exprs: []any{
¬Expr{
pos: position{line: 1378, col: 4, offset: 39251},
expr: &charClassMatcher{
pos: position{line: 1378, col: 5, offset: 39252},
val: "[\\r\\n]",
chars: []rune{'\r', '\n'},
ignoreCase: false,
inverted: false,
},
},
&anyMatcher{
line: 1378, col: 12, offset: 39259,
},
},
},
},
},
},
},
},
{
name: "ErrHeader",
pos: position{line: 1382, col: 1, offset: 39364},
expr: &actionExpr{
pos: position{line: 1382, col: 13, offset: 39376},
run: (*parser).callonErrHeader1,
expr: &seqExpr{
pos: position{line: 1382, col: 13, offset: 39376},
exprs: []any{
&stateCodeExpr{
pos: position{line: 1382, col: 13, offset: 39376},
run: (*parser).callonErrHeader3,
},
&zeroOrMoreExpr{
pos: position{line: 1384, col: 3, offset: 39408},
expr: &seqExpr{
pos: position{line: 1384, col: 4, offset: 39409},
exprs: []any{
¬Expr{
pos: position{line: 1384, col: 4, offset: 39409},
expr: &charClassMatcher{
pos: position{line: 1384, col: 5, offset: 39410},
val: "[\\r\\n]",
chars: []rune{'\r', '\n'},
ignoreCase: false,
inverted: false,
},
},
&anyMatcher{
line: 1384, col: 12, offset: 39417,
},
},
},
},
},
},
},
},
},
}
func (c *current) onDocument3(headers, defs, comments any) (any, error) {
return NewDocument(toHeaderSlice(headers), toDefinitionSlice(defs), comments.([]*Comment), NewLocationFromCurrent(c)), nil
}
func (p *parser) callonDocument3() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onDocument3(stack["headers"], stack["defs"], stack["comments"])
}
func (c *current) onHeader5(comments, v, endLineComments any) (any, error) {
c.globalStore["parse"] = "header"
v.(Header).SetComments(comments.([]*Comment), endLineComments.([]*Comment))
return v, nil
}
func (p *parser) callonHeader5() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onHeader5(stack["comments"], stack["v"], stack["endLineComments"])
}
func (c *current) onHeader25() (bool, error) {
/* fmt.Println("header predict:", c.pos, "text:", string(c.text)) */
if state, ok := c.globalStore["parse"]; !ok || ok && state.(string) == "header" {
return true, nil
}
return false, nil
}
func (p *parser) callonHeader25() (bool, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onHeader25()
}
func (c *current) onHeader16(x any) (any, error) {
/* fmt.Println("header return:", c.pos, "text:", string(c.text)) */
badHeader := x.([]any)[4].(*BadHeader)
return badHeader, nil
}
func (p *parser) callonHeader16() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onHeader16(stack["x"])
}
func (c *current) onInclude2(includeKeyword, include any) (any, error) {
includeV, ok := include.(*Literal)
if !ok {
includeV = include.([]interface{})[0].(*Literal)
}
return NewInclude(includeKeyword.(*IncludeKeyword), includeV, NewLocationFromCurrent(c)), nil
}
func (p *parser) callonInclude2() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onInclude2(stack["includeKeyword"], stack["include"])
}
func (c *current) onInclude8(x any) (any, error) {
return x.([]any)[1], nil
}
func (p *parser) callonInclude8() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onInclude8(stack["x"])
}
func (c *current) onCppInclude2(cppIncludeKeyword, include any) (any, error) {
includeV, ok := include.(*Literal)
if !ok {
includeV = include.([]interface{})[0].(*Literal)
}
return NewCPPInclude(cppIncludeKeyword.(*CPPIncludeKeyword), includeV, NewLocationFromCurrent(c)), nil
}
func (p *parser) callonCppInclude2() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onCppInclude2(stack["cppIncludeKeyword"], stack["include"])
}
func (c *current) onCppInclude8(x any) (any, error) {
return x.([]any)[1], nil
}
func (p *parser) callonCppInclude8() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onCppInclude8(stack["x"])
}
func (c *current) onNamespace2(namespaceKeyword, language, name, annotations any) (any, error) {
return NewNamespace(namespaceKeyword.(*NamespaceKeyword), language.(*NamespaceScope), name.(*Identifier), toAnnotations(annotations), NewLocationFromCurrent(c)), nil
}
func (p *parser) callonNamespace2() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onNamespace2(stack["namespaceKeyword"], stack["language"], stack["name"], stack["annotations"])
}
func (c *current) onNamespace13(x any) (any, error) {
return x.([]any)[1], nil
}
func (p *parser) callonNamespace13() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onNamespace13(stack["x"])
}
func (c *current) onNamespaceScope1(v any) (any, error) {
id := v.(*Identifier)
res := &NamespaceScope{
Identifier: *id,
}
return res, nil
}
func (p *parser) callonNamespaceScope1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onNamespaceScope1(stack["v"])
}
func (c *current) onNamespaceScopeAny1(comments, idName any) (any, error) {
return NewIdentifier(idName.(*IdentifierName), comments.([]*Comment), NewLocationFromCurrent(c)), nil
}
func (p *parser) callonNamespaceScopeAny1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onNamespaceScopeAny1(stack["comments"], stack["idName"])
}
func (c *current) onNamespaceScopeAnyToken1() (any, error) {
return NewIdentifierName("*", NewLocationFromCurrent(c)), nil
}
func (p *parser) callonNamespaceScopeAnyToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onNamespaceScopeAnyToken1()
}
func (c *current) onDefinition9(comments, v, annos, endLineComments any) (any, error) {
c.globalStore["parse"] = "definition"
def := v.(Definition)
def.SetComments(comments.([]*Comment), endLineComments.([]*Comment))
def.SetAnnotations(toAnnotations(annos))
def.SetLocation(NewLocationFromCurrent(c))
return def, nil
}
func (p *parser) callonDefinition9() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onDefinition9(stack["comments"], stack["v"], stack["annos"], stack["endLineComments"])
}
func (c *current) onDefinition34() (bool, error) {
/* fmt.Println("definition predict:", c.pos, "text:", string(c.text)) */
if state, ok := c.globalStore["parse"]; ok && state.(string) == "definition" {
return true, nil
}
return false, nil
}
func (p *parser) callonDefinition34() (bool, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onDefinition34()
}
func (c *current) onDefinition27(x any) (any, error) {
/* fmt.Println("definition return:", c.pos, "text:", string(c.text)) */
return x.([]any)[3], nil
}
func (p *parser) callonDefinition27() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onDefinition27(stack["x"])
}
func (c *current) onConst5(constKeyword, t, name, v, sep any) (any, error) {
equalAndValue := v.([]any)
return NewConst(constKeyword.(*ConstKeyword), equalAndValue[0].(*EqualKeyword), toListSeparatorKeyword(sep), name.(*Identifier), t.(*FieldType), equalAndValue[1].(*ConstValue), NewLocationFromCurrent(c)), nil
}
func (p *parser) callonConst5() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onConst5(stack["constKeyword"], stack["t"], stack["name"], stack["v"], stack["sep"])
}
func (c *current) onConst18(x any) (any, error) {
return x.([]any)[1], nil
}
func (p *parser) callonConst18() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onConst18(stack["x"])
}
func (c *current) onConstEqualValue2(v any) (any, error) {
return v, nil
}
func (p *parser) callonConstEqualValue2() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onConstEqualValue2(stack["v"])
}
func (c *current) onConstEqualValue7(x any) (any, error) {
return []any{NewBadEqualKeyword(), x.([]any)[1]}, nil
}
func (p *parser) callonConstEqualValue7() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onConstEqualValue7(stack["x"])
}
func (c *current) onConstEqualValue13(x any) (any, error) {
return x, nil
}
func (p *parser) callonConstEqualValue13() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onConstEqualValue13(stack["x"])
}
func (c *current) onTypedef3(typedefKeyword, t, alias any) (any, error) {
return NewTypedef(typedefKeyword.(*TypedefKeyword), t.(*FieldType), alias.(*Identifier), NewLocationFromCurrent(c)), nil
}
func (p *parser) callonTypedef3() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onTypedef3(stack["typedefKeyword"], stack["t"], stack["alias"])
}
func (c *current) onTypedef11(x any) (any, error) {
return x.([]any)[1], nil
}
func (p *parser) callonTypedef11() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onTypedef11(stack["x"])
}
func (c *current) onEnum5(enum, name, lcur, v, rcur any) (any, error) {
values := toEnumValueSlice(v)
return NewEnum(enum.(*EnumKeyword), lcur.(*LCurKeyword), rcur.(*RCurKeyword), name.(*Identifier), values, NewLocationFromCurrent(c)), nil
}
func (p *parser) callonEnum5() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onEnum5(stack["enum"], stack["name"], stack["lcur"], stack["v"], stack["rcur"])
}
func (c *current) onEnum18(x any) (any, error) {
return x.([]any)[1], nil
}
func (p *parser) callonEnum18() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onEnum18(stack["x"])
}
func (c *current) onEnumValueLine1(comments, v, endLineComments any) (any, error) {
v.(*EnumValue).SetComments(comments.([]*Comment), endLineComments.([]*Comment))
return v, nil
}
func (p *parser) callonEnumValueLine1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onEnumValueLine1(stack["comments"], stack["v"], stack["endLineComments"])
}
func (c *current) onEnumValue2(name, value, annos, sep any) (any, error) {
var valueNode *ConstValue
var equalNode *EqualKeyword
intV := int64(-1)
if value != nil {
//fmt.Println(value.(Node).Type(), value)
equalNode = value.([]any)[0].(*EqualKeyword)
valueNode = value.([]any)[1].(*ConstValue)
intV = valueNode.Value.(int64)
}
return NewEnumValue(toListSeparatorKeyword(sep), equalNode, name.(*Identifier), valueNode, intV, toAnnotations(annos), NewLocationFromCurrent(c)), nil
}
func (p *parser) callonEnumValue2() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onEnumValue2(stack["name"], stack["value"], stack["annos"], stack["sep"])
}
func (c *current) onService5(svc, name, extends, lcur, fns, rcur any) (any, error) {
var extendsVal *Identifier
var extendsKeyword *ExtendsKeyword
if extends != nil {
extendsKeyword = extends.([]any)[0].(*ExtendsKeyword)
extendsVal = extends.([]any)[1].(*Identifier)
}
fnsVal := toFunctionSlice(fns)
return NewService(svc.(*ServiceKeyword), extendsKeyword, lcur.(*LCurKeyword), rcur.(*RCurKeyword), name.(*Identifier), extendsVal, fnsVal, NewLocationFromCurrent(c)), nil
}
func (p *parser) callonService5() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onService5(stack["svc"], stack["name"], stack["extends"], stack["lcur"], stack["fns"], stack["rcur"])
}
func (c *current) onService23(x any) (any, error) {
return x.([]any)[1], nil
}
func (p *parser) callonService23() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onService23(stack["x"])
}
func (c *current) onStruct5(st, id, lcur, fields, rcur any) (any, error) {
return NewStruct(st.(*StructKeyword), lcur.(*LCurKeyword), rcur.(*RCurKeyword), id.(*Identifier), toFieldSlice(fields), NewLocationFromCurrent(c)), nil
}
func (p *parser) callonStruct5() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onStruct5(stack["st"], stack["id"], stack["lcur"], stack["fields"], stack["rcur"])
}
func (c *current) onStruct18(x any) (any, error) {
return x.([]any)[1], nil
}
func (p *parser) callonStruct18() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onStruct18(stack["x"])
}
func (c *current) onUnion5(union, name, lcur, fields, rcur any) (any, error) {
return NewUnion(union.(*UnionKeyword), lcur.(*LCurKeyword), rcur.(*RCurKeyword), name.(*Identifier), toFieldSlice(fields), NewLocationFromCurrent(c)), nil
}
func (p *parser) callonUnion5() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onUnion5(stack["union"], stack["name"], stack["lcur"], stack["fields"], stack["rcur"])
}
func (c *current) onUnion18(x any) (any, error) {
return x.([]any)[1], nil
}
func (p *parser) callonUnion18() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onUnion18(stack["x"])
}
func (c *current) onException5(excep, name, lcur, fields, rcur any) (any, error) {
return NewException(excep.(*ExceptionKeyword), lcur.(*LCurKeyword), rcur.(*RCurKeyword), name.(*Identifier), toFieldSlice(fields), NewLocationFromCurrent(c)), nil
}
func (p *parser) callonException5() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onException5(stack["excep"], stack["name"], stack["lcur"], stack["fields"], stack["rcur"])
}
func (c *current) onException18(x any) (any, error) {
return x.([]any)[1], nil
}
func (p *parser) callonException18() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onException18(stack["x"])
}
func (c *current) onFieldWithThrow3(x any) (any, error) {
return x.([]any)[2], nil
}
func (p *parser) callonFieldWithThrow3() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onFieldWithThrow3(stack["x"])
}
func (c *current) onField1(comments, index, required, fieldType, id, value, annos, sep, lineComments any) (any, error) {
var constV *ConstValue
var equalKeyword *EqualKeyword
if value != nil {
equalKeyword = value.([]any)[0].(*EqualKeyword)
constV = value.([]any)[1].(*ConstValue)
}
var requiredV *RequiredKeyword
if required != nil {
requiredV = required.(*RequiredKeyword)
}
return NewField(equalKeyword, toListSeparatorKeyword(sep), comments.([]*Comment), lineComments.([]*Comment), toAnnotations(annos), index.(*FieldIndex), requiredV, fieldType.(*FieldType), id.(*Identifier), constV, NewLocationFromCurrent(c)), nil
}
func (p *parser) callonField1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onField1(stack["comments"], stack["index"], stack["required"], stack["fieldType"], stack["id"], stack["value"], stack["annos"], stack["sep"], stack["lineComments"])
}
func (c *current) onFieldId2(comments, i, colon any) (any, error) {
fieldIndex := i.(*FieldIndex)
return NewFieldIndex(colon.(*ColonKeyword), fieldIndex.Value, comments.([]*Comment), fieldIndex.Location), nil
}
func (p *parser) callonFieldId2() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onFieldId2(stack["comments"], stack["i"], stack["colon"])
}
func (c *current) onFieldReq1(comments, r any) (any, error) {
kw := NewKeyword(comments.([]*Comment), r.(*KeywordLiteral), NewLocationFromCurrent(c))
return &RequiredKeyword{Keyword: kw}, nil
}
func (p *parser) callonFieldReq1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onFieldReq1(stack["comments"], stack["r"])
}
func (c *current) onIsRequired1(v any) (any, error) {
return v, nil
}
func (p *parser) callonIsRequired1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onIsRequired1(stack["v"])
}
func (c *current) onRequiredToken1() (any, error) {
return NewKeywordLiteral(c), nil
}
func (p *parser) callonRequiredToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onRequiredToken1()
}
func (c *current) onOptionalToken1() (any, error) {
return NewKeywordLiteral(c), nil
}
func (p *parser) callonOptionalToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onOptionalToken1()
}
func (c *current) onFunction4(comments, oneway, ft, name, lpar, args, rpar, throws, annos, sep, endLineComments any) (any, error) {
var ftype *FieldType
var voidKeyword *VoidKeyword
if _, ok := ft.(*VoidKeyword); ok {
voidKeyword = ft.(*VoidKeyword)
} else {
ftype = ft.(*FieldType)
}
var throwsV *Throws
if throws != nil {
throwsV = throws.(*Throws)
}
var onewayKeyword *OnewayKeyword
if oneway != nil {
onewayKeyword = oneway.(*OnewayKeyword)
}
return NewFunction(lpar.(*LParKeyword), rpar.(*RParKeyword), toListSeparatorKeyword(sep), name.(*Identifier), onewayKeyword, voidKeyword, ftype, toFieldSlice(args), throwsV, comments.([]*Comment), endLineComments.([]*Comment), toAnnotations(annos), NewLocationFromCurrent(c)), nil
}
func (p *parser) callonFunction4() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onFunction4(stack["comments"], stack["oneway"], stack["ft"], stack["name"], stack["lpar"], stack["args"], stack["rpar"], stack["throws"], stack["annos"], stack["sep"], stack["endLineComments"])
}
func (c *current) onFunction33(x any) (any, error) {
return x.([]any)[2], nil
}
func (p *parser) callonFunction33() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onFunction33(stack["x"])
}
func (c *current) onFunctionFieldWithThrow2(v any) (any, error) {
return v, nil
}
func (p *parser) callonFunctionFieldWithThrow2() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onFunctionFieldWithThrow2(stack["v"])
}
func (c *current) onFunctionFieldWithThrow5(x any) (any, error) {
return x.([]any)[2], nil
}
func (p *parser) callonFunctionFieldWithThrow5() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onFunctionFieldWithThrow5(stack["x"])
}
func (c *current) onThrows1(throws, lpar, fields, rpar any) (any, error) {
return NewThrows(throws.(*ThrowsKeyword), lpar.(*LParKeyword), rpar.(*RParKeyword), toFieldSlice(fields), NewLocationFromCurrent(c)), nil
}
func (p *parser) callonThrows1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onThrows1(stack["throws"], stack["lpar"], stack["fields"], stack["rpar"])
}
func (c *current) onFieldType1(v, annos any) (any, error) {
ft := v.(*FieldType)
ft.Annotations = toAnnotations(annos)
return ft, nil
}
func (p *parser) callonFieldType1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onFieldType1(stack["v"], stack["annos"])
}
func (c *current) onIdentifierType1(v any) (any, error) {
return v.(*Identifier).ToFieldType(), nil
}
func (p *parser) callonIdentifierType1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onIdentifierType1(stack["v"])
}
func (c *current) onBaseType1(v any) (any, error) {
return NewFieldType(nil, nil, nil, nil, v.(*TypeName), nil, nil, NewLocationFromCurrent(c)), nil
}
func (p *parser) callonBaseType1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onBaseType1(stack["v"])
}
func (c *current) onContainerType1(v any) (any, error) {
return v.(*FieldType), nil
}
func (p *parser) callonContainerType1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onContainerType1(stack["v"])
}
func (c *current) onMapType1(t, cpp, lp, key, comma, value, rp any) (any, error) {
var cppType *CppType
if cpp != nil {
cppType = cpp.(*CppType)
}
return NewFieldType(lp.(*LPointKeyword), rp.(*RPointKeyword), comma.(*CommaKeyword), cppType, t.(*TypeName), key.(*FieldType), value.(*FieldType), NewLocationFromCurrent(c)), nil
}
func (p *parser) callonMapType1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onMapType1(stack["t"], stack["cpp"], stack["lp"], stack["key"], stack["comma"], stack["value"], stack["rp"])
}
func (c *current) onSetType1(t, cpp, lp, key, rp any) (any, error) {
var cppType *CppType
if cpp != nil {
cppType = cpp.(*CppType)
}
return NewFieldType(lp.(*LPointKeyword), rp.(*RPointKeyword), nil, cppType, t.(*TypeName), key.(*FieldType), nil, NewLocationFromCurrent(c)), nil
}
func (p *parser) callonSetType1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onSetType1(stack["t"], stack["cpp"], stack["lp"], stack["key"], stack["rp"])
}
func (c *current) onListType1(t, lp, key, rp, cpp any) (any, error) {
var cppType *CppType
if cpp != nil {
cppType = cpp.(*CppType)
}
return NewFieldType(lp.(*LPointKeyword), rp.(*RPointKeyword), nil, cppType, t.(*TypeName), key.(*FieldType), nil, NewLocationFromCurrent(c)), nil
}
func (p *parser) callonListType1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onListType1(stack["t"], stack["lp"], stack["key"], stack["rp"], stack["cpp"])
}
func (c *current) onCppType1(cpp, l any) (any, error) {
return NewCppType(cpp.(*CppTypeKeyword), l.(*Literal), NewLocationFromCurrent(c)), nil
}
func (p *parser) callonCppType1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onCppType1(stack["cpp"], stack["l"])
}
func (c *current) onConstValue1(v any) (any, error) {
if literal, ok := v.(*Literal); ok {
return NewConstValue("string", literal, NewLocationFromCurrent(c)), nil
}
return v.(*ConstValue), nil
}
func (p *parser) callonConstValue1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onConstValue1(stack["v"])
}
func (c *current) onIdentifierConst1(comments, cv any) (any, error) {
node := cv.(*ConstValue)
node.SetComments(comments.([]*Comment))
return node, nil
}
func (p *parser) callonIdentifierConst1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onIdentifierConst1(stack["comments"], stack["cv"])
}
func (c *current) onIdentifierConstValue1(id any) (any, error) {
cv := NewConstValue("identifier", id.(*Identifier).Name.Text, NewLocationFromCurrent(c))
return cv, nil
}
func (p *parser) callonIdentifierConstValue1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onIdentifierConstValue1(stack["id"])
}
func (c *current) onEnumValueIntConstant2(v any) (any, error) {
return v, nil
}
func (p *parser) callonEnumValueIntConstant2() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onEnumValueIntConstant2(stack["v"])
}
func (c *current) onEnumValueIntConstant7(x any) (any, error) {
return []any{x.([]any)[0], x.([]any)[2]}, nil
}
func (p *parser) callonEnumValueIntConstant7() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onEnumValueIntConstant7(stack["x"])
}
func (c *current) onIntConstant2(comments, v any) (any, error) {
cv := v.(*ConstValue)
cv.SetComments(comments.([]*Comment))
return cv, nil
}
func (p *parser) callonIntConstant2() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onIntConstant2(stack["comments"], stack["v"])
}
func (c *current) onIntConstant15(x any) (any, error) {
return x.([]any)[2], nil
}
func (p *parser) callonIntConstant15() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onIntConstant15(stack["x"])
}
func (c *current) onHexIntConstant1() (any, error) {
v, err := strconv.ParseInt(strings.TrimPrefix(string(c.text), "0x"), 16, 64)
if err != nil {
return nil, err
}
cv := NewConstValue("i64", v, NewLocationFromCurrent(c))
cv.ValueInText = string(c.text)
return cv, nil
}
func (p *parser) callonHexIntConstant1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onHexIntConstant1()
}
func (c *current) onOctIntConstant1() (any, error) {
v, err := strconv.ParseInt(strings.TrimPrefix(string(c.text), "0o"), 8, 64)
if err != nil {
return nil, err
}
cv := NewConstValue("i64", v, NewLocationFromCurrent(c))
cv.ValueInText = string(c.text)
return cv, nil
}
func (p *parser) callonOctIntConstant1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onOctIntConstant1()
}
func (c *current) onNormalIntConstant1() (any, error) {
v, err := strconv.ParseInt(string(c.text), 10, 64)
if err != nil {
return nil, err
}
cv := NewConstValue("i64", v, NewLocationFromCurrent(c))
cv.ValueInText = string(c.text)
return cv, nil
}
func (p *parser) callonNormalIntConstant1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onNormalIntConstant1()
}
func (c *current) onFieldIndex2() (any, error) {
v, err := strconv.ParseInt(string(c.text), 10, 64)
if err != nil {
return nil, err
}
return NewFieldIndex(nil, int(v), nil, NewLocationFromCurrent(c)), nil
}
func (p *parser) callonFieldIndex2() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onFieldIndex2()
}
func (c *current) onFieldIndex5(x any) (any, error) {
return x.([]any)[2], nil
}
func (p *parser) callonFieldIndex5() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onFieldIndex5(stack["x"])
}
func (c *current) onDoubleConstant1(comments, v any) (any, error) {
cv := v.(*ConstValue)
cv.SetComments(comments.([]*Comment))
return cv, nil
}
func (p *parser) callonDoubleConstant1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onDoubleConstant1(stack["comments"], stack["v"])
}
func (c *current) onDoubleConstantValue1() (any, error) {
v, err := strconv.ParseFloat(string(c.text), 64)
if err != nil {
return nil, err
}
cv := NewConstValue("double", v, NewLocationFromCurrent(c))
cv.ValueInText = string(c.text)
return cv, nil
}
func (p *parser) callonDoubleConstantValue1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onDoubleConstantValue1()
}
func (c *current) onAnnotations1(lpar, annos, rpar any) (any, error) {
return NewAnnotations(lpar.(*LParKeyword), rpar.(*RParKeyword), toAnnotationSlice(annos), NewLocationFromCurrent(c)), nil
}
func (p *parser) callonAnnotations1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onAnnotations1(stack["lpar"], stack["annos"], stack["rpar"])
}
func (c *current) onAnnotation1(id, eq, value, sep any) (any, error) {
return NewAnnotation(eq.(*EqualKeyword), toListSeparatorKeyword(sep), id.(*Identifier), value.(*Literal), NewLocationFromCurrent(c)), nil
}
func (p *parser) callonAnnotation1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onAnnotation1(stack["id"], stack["eq"], stack["value"], stack["sep"])
}
func (c *current) onConstList1(lbrk, v, rbrk any) (any, error) {
cv := NewConstValue("list", toConstValueSlice(v), NewLocationFromCurrent(c))
cv.LBrkKeyword = lbrk.(*LBrkKeyword)
cv.RBrkKeyword = rbrk.(*RBrkKeyword)
return cv, nil
}
func (p *parser) callonConstList1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onConstList1(stack["lbrk"], stack["v"], stack["rbrk"])
}
func (c *current) onConstListItem1(v, sep any) (any, error) {
cv := v.(*ConstValue)
if sep != nil {
cv.ListSeparatorKeyword = sep.(*ListSeparatorKeyword)
}
return cv, nil
}
func (p *parser) callonConstListItem1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onConstListItem1(stack["v"], stack["sep"])
}
func (c *current) onConstMap1(lcur, v, rcur any) (any, error) {
cv := NewConstValue("map", toConstValueSlice(v), NewLocationFromCurrent(c))
cv.LCurKeyword = lcur.(*LCurKeyword)
cv.RCurKeyword = rcur.(*RCurKeyword)
return cv, nil
}
func (p *parser) callonConstMap1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onConstMap1(stack["lcur"], stack["v"], stack["rcur"])
}
func (c *current) onConstMapItem1(key, colon, value, sep any) (any, error) {
cv := NewMapConstValue(key.(*ConstValue), value.(*ConstValue), NewLocationFromCurrent(c))
cv.ColonKeyword = colon.(*ColonKeyword)
if sep != nil {
cv.ListSeparatorKeyword = sep.(*ListSeparatorKeyword)
}
return cv, nil
}
func (p *parser) callonConstMapItem1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onConstMapItem1(stack["key"], stack["colon"], stack["value"], stack["sep"])
}
func (c *current) onEscapeLiteralChar1() (any, error) {
return string(c.text), nil
}
func (p *parser) callonEscapeLiteralChar1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onEscapeLiteralChar1()
}
func (c *current) onLiteral5(l any) (any, error) {
return l, nil
}
func (p *parser) callonLiteral5() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onLiteral5(stack["l"])
}
func (c *current) onLiteral12(comments, t any) (any, error) {
return NewLiteral(comments.([]*Comment), t.(*LiteralValue), "\"", NewLocationFromCurrent(c)), nil
}
func (p *parser) callonLiteral12() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onLiteral12(stack["comments"], stack["t"])
}
func (c *current) onLiteral112(x any) (any, error) {
return x.([]any)[1], nil
}
func (p *parser) callonLiteral112() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onLiteral112(stack["x"])
}
func (c *current) onLiteral22(comments, t any) (any, error) {
return NewLiteral(comments.([]*Comment), t.(*LiteralValue), "'", NewLocationFromCurrent(c)), nil
}
func (p *parser) callonLiteral22() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onLiteral22(stack["comments"], stack["t"])
}
func (c *current) onLiteral212(x any) (any, error) {
return x.([]any)[1], nil
}
func (p *parser) callonLiteral212() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onLiteral212(stack["x"])
}
func (c *current) onLiteral1Val1() (any, error) {
return NewLiteralValue(string(c.text), NewLocationFromCurrent(c)), nil
}
func (p *parser) callonLiteral1Val1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onLiteral1Val1()
}
func (c *current) onLiteral2Val1() (any, error) {
return NewLiteralValue(string(c.text), NewLocationFromCurrent(c)), nil
}
func (p *parser) callonLiteral2Val1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onLiteral2Val1()
}
func (c *current) onDefinitionIdentifier2(id any) (any, error) {
return id.(*Identifier), nil
}
func (p *parser) callonDefinitionIdentifier2() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onDefinitionIdentifier2(stack["id"])
}
func (c *current) onIdentifier1(comments, id any) (any, error) {
idName := id.(*IdentifierName)
return NewIdentifier(idName, comments.([]*Comment), NewLocationFromCurrent(c)), nil
}
func (p *parser) callonIdentifier1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onIdentifier1(stack["comments"], stack["id"])
}
func (c *current) onIdentifierToken1() (any, error) {
return NewIdentifierName(string(c.text), NewLocationFromCurrent(c)), nil
}
func (p *parser) callonIdentifierToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onIdentifierToken1()
}
func (c *current) onListSeparator1(comments, t any) (any, error) {
kw := NewKeyword(comments.([]*Comment), t.(*KeywordLiteral), NewLocationFromCurrent(c))
return &ListSeparatorKeyword{Keyword: kw}, nil
}
func (p *parser) callonListSeparator1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onListSeparator1(stack["comments"], stack["t"])
}
func (c *current) onListSeparatorToken1() (any, error) {
return NewKeywordLiteral(c), nil
}
func (p *parser) callonListSeparatorToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onListSeparatorToken1()
}
func (c *current) onLetter4() (any, error) {
return string(c.text), nil
}
func (p *parser) callonLetter4() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onLetter4()
}
func (c *current) onLetterOrDigit5() (any, error) {
return string(c.text), nil
}
func (p *parser) callonLetterOrDigit5() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onLetterOrDigit5()
}
func (c *current) onDigit1() (any, error) {
return string(c.text), nil
}
func (p *parser) callonDigit1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onDigit1()
}
func (c *current) onReservedComments1(comments any) (any, error) {
return toCommentSlice(comments), nil
}
func (p *parser) callonReservedComments1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onReservedComments1(stack["comments"])
}
func (c *current) onReservedEndLineComments1(comments any) (any, error) {
return toCommentSlice(comments), nil
}
func (p *parser) callonReservedEndLineComments1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onReservedEndLineComments1(stack["comments"])
}
func (c *current) onSpace1() (any, error) {
return "", nil
}
func (p *parser) callonSpace1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onSpace1()
}
func (c *current) onIndent1() (any, error) {
return "", nil
}
func (p *parser) callonIndent1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onIndent1()
}
func (c *current) onComment1(v any) (any, error) {
return v.(*Comment), nil
}
func (p *parser) callonComment1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onComment1(stack["v"])
}
func (c *current) onLongComment1() (any, error) {
return NewComment(string(c.text), CommentStyleMultiLine, NewLocationFromCurrent(c)), nil
}
func (p *parser) callonLongComment1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onLongComment1()
}
func (c *current) onLongCommentMatch1() (any, error) {
return string(c.text), nil
}
func (p *parser) callonLongCommentMatch1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onLongCommentMatch1()
}
func (c *current) onLineComment1() (any, error) {
return NewComment(string(c.text), CommentStyleSingleLine, NewLocationFromCurrent(c)), nil
}
func (p *parser) callonLineComment1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onLineComment1()
}
func (c *current) onLineCommentMatch1() (any, error) {
return string(c.text), nil
}
func (p *parser) callonLineCommentMatch1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onLineCommentMatch1()
}
func (c *current) onUnixComment1() (any, error) {
return NewComment(string(c.text), CommentStyleShell, NewLocationFromCurrent(c)), nil
}
func (p *parser) callonUnixComment1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onUnixComment1()
}
func (c *current) onUnixCommentMatch1() (any, error) {
return string(c.text), nil
}
func (p *parser) callonUnixCommentMatch1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onUnixCommentMatch1()
}
func (c *current) onBOOL1(comments, t any) (any, error) {
tn := t.(*TypeName)
tn.Comments = comments.([]*Comment)
return tn, nil
}
func (p *parser) callonBOOL1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onBOOL1(stack["comments"], stack["t"])
}
func (c *current) onBOOLToken1() (any, error) {
return NewTypeName(string(c.text), c.pos), nil
}
func (p *parser) callonBOOLToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onBOOLToken1()
}
func (c *current) onBYTE1(comments, t any) (any, error) {
tn := t.(*TypeName)
tn.Comments = comments.([]*Comment)
return tn, nil
}
func (p *parser) callonBYTE1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onBYTE1(stack["comments"], stack["t"])
}
func (c *current) onBYTEToken1() (any, error) {
return NewTypeName(string(c.text), c.pos), nil
}
func (p *parser) callonBYTEToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onBYTEToken1()
}
func (c *current) onI81(comments, t any) (any, error) {
tn := t.(*TypeName)
tn.Comments = comments.([]*Comment)
return tn, nil
}
func (p *parser) callonI81() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onI81(stack["comments"], stack["t"])
}
func (c *current) onI8Token1() (any, error) {
return NewTypeName(string(c.text), c.pos), nil
}
func (p *parser) callonI8Token1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onI8Token1()
}
func (c *current) onI161(comments, t any) (any, error) {
tn := t.(*TypeName)
tn.Comments = comments.([]*Comment)
return tn, nil
}
func (p *parser) callonI161() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onI161(stack["comments"], stack["t"])
}
func (c *current) onI16Token1() (any, error) {
return NewTypeName(string(c.text), c.pos), nil
}
func (p *parser) callonI16Token1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onI16Token1()
}
func (c *current) onI321(comments, t any) (any, error) {
tn := t.(*TypeName)
tn.Comments = comments.([]*Comment)
return tn, nil
}
func (p *parser) callonI321() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onI321(stack["comments"], stack["t"])
}
func (c *current) onI32Token1() (any, error) {
return NewTypeName(string(c.text), c.pos), nil
}
func (p *parser) callonI32Token1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onI32Token1()
}
func (c *current) onI641(comments, t any) (any, error) {
tn := t.(*TypeName)
tn.Comments = comments.([]*Comment)
return tn, nil
}
func (p *parser) callonI641() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onI641(stack["comments"], stack["t"])
}
func (c *current) onI64Token1() (any, error) {
return NewTypeName(string(c.text), c.pos), nil
}
func (p *parser) callonI64Token1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onI64Token1()
}
func (c *current) onDOUBLE1(comments, t any) (any, error) {
tn := t.(*TypeName)
tn.Comments = comments.([]*Comment)
return tn, nil
}
func (p *parser) callonDOUBLE1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onDOUBLE1(stack["comments"], stack["t"])
}
func (c *current) onDOUBLEToken1() (any, error) {
return NewTypeName(string(c.text), c.pos), nil
}
func (p *parser) callonDOUBLEToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onDOUBLEToken1()
}
func (c *current) onSTRING1(comments, t any) (any, error) {
tn := t.(*TypeName)
tn.Comments = comments.([]*Comment)
return tn, nil
}
func (p *parser) callonSTRING1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onSTRING1(stack["comments"], stack["t"])
}
func (c *current) onSTRINGToken1() (any, error) {
return NewTypeName(string(c.text), c.pos), nil
}
func (p *parser) callonSTRINGToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onSTRINGToken1()
}
func (c *current) onBINARY1(comments, t any) (any, error) {
tn := t.(*TypeName)
tn.Comments = comments.([]*Comment)
return tn, nil
}
func (p *parser) callonBINARY1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onBINARY1(stack["comments"], stack["t"])
}
func (c *current) onBINARYToken1() (any, error) {
return NewTypeName(string(c.text), c.pos), nil
}
func (p *parser) callonBINARYToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onBINARYToken1()
}
func (c *current) onUUID1(comments, t any) (any, error) {
tn := t.(*TypeName)
tn.Comments = comments.([]*Comment)
return tn, nil
}
func (p *parser) callonUUID1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onUUID1(stack["comments"], stack["t"])
}
func (c *current) onUUIDToken1() (any, error) {
return NewTypeName(string(c.text), c.pos), nil
}
func (p *parser) callonUUIDToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onUUIDToken1()
}
func (c *current) onMAP1(comments, t any) (any, error) {
tn := t.(*TypeName)
tn.Comments = comments.([]*Comment)
return tn, nil
}
func (p *parser) callonMAP1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onMAP1(stack["comments"], stack["t"])
}
func (c *current) onMAPToken1() (any, error) {
return NewTypeName(string(c.text), c.pos), nil
}
func (p *parser) callonMAPToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onMAPToken1()
}
func (c *current) onSET1(comments, t any) (any, error) {
tn := t.(*TypeName)
tn.Comments = comments.([]*Comment)
return tn, nil
}
func (p *parser) callonSET1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onSET1(stack["comments"], stack["t"])
}
func (c *current) onSETToken1() (any, error) {
return NewTypeName(string(c.text), c.pos), nil
}
func (p *parser) callonSETToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onSETToken1()
}
func (c *current) onLIST1(comments, t any) (any, error) {
tn := t.(*TypeName)
tn.Comments = comments.([]*Comment)
return tn, nil
}
func (p *parser) callonLIST1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onLIST1(stack["comments"], stack["t"])
}
func (c *current) onListToken1() (any, error) {
return NewTypeName(string(c.text), c.pos), nil
}
func (p *parser) callonListToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onListToken1()
}
func (c *current) onCONST1(comments, t any) (any, error) {
kw := NewKeyword(comments.([]*Comment), t.(*KeywordLiteral), NewLocationFromCurrent(c))
return &ConstKeyword{Keyword: kw}, nil
}
func (p *parser) callonCONST1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onCONST1(stack["comments"], stack["t"])
}
func (c *current) onCONSTToken1() (any, error) {
return NewKeywordLiteral(c), nil
}
func (p *parser) callonCONSTToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onCONSTToken1()
}
func (c *current) onONEWAY1(comments, t any) (any, error) {
kw := NewKeyword(comments.([]*Comment), t.(*KeywordLiteral), NewLocationFromCurrent(c))
return &OnewayKeyword{Keyword: kw}, nil
}
func (p *parser) callonONEWAY1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onONEWAY1(stack["comments"], stack["t"])
}
func (c *current) onONEWAYToken1() (any, error) {
return NewKeywordLiteral(c), nil
}
func (p *parser) callonONEWAYToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onONEWAYToken1()
}
func (c *current) onTYPEDEF1(comments, t any) (any, error) {
kw := NewKeyword(comments.([]*Comment), t.(*KeywordLiteral), NewLocationFromCurrent(c))
return &TypedefKeyword{Keyword: kw}, nil
}
func (p *parser) callonTYPEDEF1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onTYPEDEF1(stack["comments"], stack["t"])
}
func (c *current) onTYPEDEFToken1() (any, error) {
return NewKeywordLiteral(c), nil
}
func (p *parser) callonTYPEDEFToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onTYPEDEFToken1()
}
func (c *current) onVOID1(comments, t any) (any, error) {
kw := NewKeyword(comments.([]*Comment), t.(*KeywordLiteral), NewLocationFromCurrent(c))
return &VoidKeyword{Keyword: kw}, nil
}
func (p *parser) callonVOID1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onVOID1(stack["comments"], stack["t"])
}
func (c *current) onVOIDToken1() (any, error) {
return NewKeywordLiteral(c), nil
}
func (p *parser) callonVOIDToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onVOIDToken1()
}
func (c *current) onTHROWS1(comments, t any) (any, error) {
kw := NewKeyword(comments.([]*Comment), t.(*KeywordLiteral), NewLocationFromCurrent(c))
return &ThrowsKeyword{Keyword: kw}, nil
}
func (p *parser) callonTHROWS1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onTHROWS1(stack["comments"], stack["t"])
}
func (c *current) onTHROWSToken1() (any, error) {
return NewKeywordLiteral(c), nil
}
func (p *parser) callonTHROWSToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onTHROWSToken1()
}
func (c *current) onEXCEPTION1(comments, t any) (any, error) {
kw := NewKeyword(comments.([]*Comment), t.(*KeywordLiteral), NewLocationFromCurrent(c))
return &ExceptionKeyword{Keyword: kw}, nil
}
func (p *parser) callonEXCEPTION1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onEXCEPTION1(stack["comments"], stack["t"])
}
func (c *current) onEXCEPTIONToken1() (any, error) {
return NewKeywordLiteral(c), nil
}
func (p *parser) callonEXCEPTIONToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onEXCEPTIONToken1()
}
func (c *current) onEXTENDS1(comments, t any) (any, error) {
kw := NewKeyword(comments.([]*Comment), t.(*KeywordLiteral), NewLocationFromCurrent(c))
return &ExtendsKeyword{Keyword: kw}, nil
}
func (p *parser) callonEXTENDS1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onEXTENDS1(stack["comments"], stack["t"])
}
func (c *current) onEXTENDSToken1() (any, error) {
return NewKeywordLiteral(c), nil
}
func (p *parser) callonEXTENDSToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onEXTENDSToken1()
}
func (c *current) onSERVICE1(comments, t any) (any, error) {
kw := NewKeyword(comments.([]*Comment), t.(*KeywordLiteral), NewLocationFromCurrent(c))
return &ServiceKeyword{Keyword: kw}, nil
}
func (p *parser) callonSERVICE1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onSERVICE1(stack["comments"], stack["t"])
}
func (c *current) onSERVICEToken1() (any, error) {
return NewKeywordLiteral(c), nil
}
func (p *parser) callonSERVICEToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onSERVICEToken1()
}
func (c *current) onSTRUCT1(comments, t any) (any, error) {
kw := NewKeyword(comments.([]*Comment), t.(*KeywordLiteral), NewLocationFromCurrent(c))
return &StructKeyword{Keyword: kw}, nil
}
func (p *parser) callonSTRUCT1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onSTRUCT1(stack["comments"], stack["t"])
}
func (c *current) onSTRUCTToken1() (any, error) {
return NewKeywordLiteral(c), nil
}
func (p *parser) callonSTRUCTToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onSTRUCTToken1()
}
func (c *current) onUNION1(comments, t any) (any, error) {
kw := NewKeyword(comments.([]*Comment), t.(*KeywordLiteral), NewLocationFromCurrent(c))
return &UnionKeyword{Keyword: kw}, nil
}
func (p *parser) callonUNION1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onUNION1(stack["comments"], stack["t"])
}
func (c *current) onUNIONToken1() (any, error) {
return NewKeywordLiteral(c), nil
}
func (p *parser) callonUNIONToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onUNIONToken1()
}
func (c *current) onENUM1(comments, t any) (any, error) {
kw := NewKeyword(comments.([]*Comment), t.(*KeywordLiteral), NewLocationFromCurrent(c))
return &EnumKeyword{Keyword: kw}, nil
}
func (p *parser) callonENUM1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onENUM1(stack["comments"], stack["t"])
}
func (c *current) onENUMToken1() (any, error) {
return NewKeywordLiteral(c), nil
}
func (p *parser) callonENUMToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onENUMToken1()
}
func (c *current) onINCLUDE1(comments, t any) (any, error) {
kw := NewKeyword(comments.([]*Comment), t.(*KeywordLiteral), NewLocationFromCurrent(c))
return &IncludeKeyword{Keyword: kw}, nil
}
func (p *parser) callonINCLUDE1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onINCLUDE1(stack["comments"], stack["t"])
}
func (c *current) onINCLUDEToken1() (any, error) {
return NewKeywordLiteral(c), nil
}
func (p *parser) callonINCLUDEToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onINCLUDEToken1()
}
func (c *current) onCPPINCLUDE1(comments, t any) (any, error) {
kw := NewKeyword(comments.([]*Comment), t.(*KeywordLiteral), NewLocationFromCurrent(c))
return &CPPIncludeKeyword{Keyword: kw}, nil
}
func (p *parser) callonCPPINCLUDE1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onCPPINCLUDE1(stack["comments"], stack["t"])
}
func (c *current) onCPPINCLUDEToken1() (any, error) {
return NewKeywordLiteral(c), nil
}
func (p *parser) callonCPPINCLUDEToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onCPPINCLUDEToken1()
}
func (c *current) onNAMESPACE1(comments, t any) (any, error) {
kw := NewKeyword(comments.([]*Comment), t.(*KeywordLiteral), NewLocationFromCurrent(c))
return &NamespaceKeyword{Keyword: kw}, nil
}
func (p *parser) callonNAMESPACE1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onNAMESPACE1(stack["comments"], stack["t"])
}
func (c *current) onNAMESPACEToken1() (any, error) {
return NewKeywordLiteral(c), nil
}
func (p *parser) callonNAMESPACEToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onNAMESPACEToken1()
}
func (c *current) onCPPTYPE1(comments, t any) (any, error) {
kw := NewKeyword(comments.([]*Comment), t.(*KeywordLiteral), NewLocationFromCurrent(c))
return &CppTypeKeyword{Keyword: kw}, nil
}
func (p *parser) callonCPPTYPE1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onCPPTYPE1(stack["comments"], stack["t"])
}
func (c *current) onCPPTYPEToken1() (any, error) {
return NewKeywordLiteral(c), nil
}
func (p *parser) callonCPPTYPEToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onCPPTYPEToken1()
}
func (c *current) onLBRK1(comments, t any) (any, error) {
kw := NewKeyword(comments.([]*Comment), t.(*KeywordLiteral), NewLocationFromCurrent(c))
return &LBrkKeyword{Keyword: kw}, nil
}
func (p *parser) callonLBRK1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onLBRK1(stack["comments"], stack["t"])
}
func (c *current) onLBRKToken1() (any, error) {
return NewKeywordLiteral(c), nil
}
func (p *parser) callonLBRKToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onLBRKToken1()
}
func (c *current) onRBRK1(comments, t any) (any, error) {
kw := NewKeyword(comments.([]*Comment), t.(*KeywordLiteral), NewLocationFromCurrent(c))
return &RBrkKeyword{Keyword: kw}, nil
}
func (p *parser) callonRBRK1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onRBRK1(stack["comments"], stack["t"])
}
func (c *current) onRBRKToken1() (any, error) {
return NewKeywordLiteral(c), nil
}
func (p *parser) callonRBRKToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onRBRKToken1()
}
func (c *current) onLCUR1(comments, t any) (any, error) {
kw := NewKeyword(comments.([]*Comment), t.(*KeywordLiteral), NewLocationFromCurrent(c))
return &LCurKeyword{Keyword: kw}, nil
}
func (p *parser) callonLCUR1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onLCUR1(stack["comments"], stack["t"])
}
func (c *current) onRCUR1(comments, t any) (any, error) {
kw := NewKeyword(comments.([]*Comment), t.(*KeywordLiteral), NewLocationFromCurrent(c))
return &RCurKeyword{Keyword: kw}, nil
}
func (p *parser) callonRCUR1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onRCUR1(stack["comments"], stack["t"])
}
func (c *current) onLCURToken1() (any, error) {
return NewKeywordLiteral(c), nil
}
func (p *parser) callonLCURToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onLCURToken1()
}
func (c *current) onRCURToken2() (any, error) {
return NewKeywordLiteral(c), nil
}
func (p *parser) callonRCURToken2() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onRCURToken2()
}
func (c *current) onEQUAL1(comments, t any) (any, error) {
kw := NewKeyword(comments.([]*Comment), t.(*KeywordLiteral), NewLocationFromCurrent(c))
return &EqualKeyword{Keyword: kw}, nil
}
func (p *parser) callonEQUAL1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onEQUAL1(stack["comments"], stack["t"])
}
func (c *current) onEQUALToken1() (any, error) {
return NewKeywordLiteral(c), nil
}
func (p *parser) callonEQUALToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onEQUALToken1()
}
func (c *current) onLPOINT1(comments, t any) (any, error) {
kw := NewKeyword(comments.([]*Comment), t.(*KeywordLiteral), NewLocationFromCurrent(c))
return &LPointKeyword{Keyword: kw}, nil
}
func (p *parser) callonLPOINT1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onLPOINT1(stack["comments"], stack["t"])
}
func (c *current) onLPOINTToken1() (any, error) {
return NewKeywordLiteral(c), nil
}
func (p *parser) callonLPOINTToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onLPOINTToken1()
}
func (c *current) onRPOINT1(comments, t any) (any, error) {
kw := NewKeyword(comments.([]*Comment), t.(*KeywordLiteral), NewLocationFromCurrent(c))
return &RPointKeyword{Keyword: kw}, nil
}
func (p *parser) callonRPOINT1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onRPOINT1(stack["comments"], stack["t"])
}
func (c *current) onRPOINTToken1() (any, error) {
return NewKeywordLiteral(c), nil
}
func (p *parser) callonRPOINTToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onRPOINTToken1()
}
func (c *current) onCOMMA1(comments, t any) (any, error) {
kw := NewKeyword(comments.([]*Comment), t.(*KeywordLiteral), NewLocationFromCurrent(c))
return &CommaKeyword{Keyword: kw}, nil
}
func (p *parser) callonCOMMA1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onCOMMA1(stack["comments"], stack["t"])
}
func (c *current) onCOMMAToken1() (any, error) {
return NewKeywordLiteral(c), nil
}
func (p *parser) callonCOMMAToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onCOMMAToken1()
}
func (c *current) onLPAR1(comments, t any) (any, error) {
kw := NewKeyword(comments.([]*Comment), t.(*KeywordLiteral), NewLocationFromCurrent(c))
return &LParKeyword{Keyword: kw}, nil
}
func (p *parser) callonLPAR1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onLPAR1(stack["comments"], stack["t"])
}
func (c *current) onLPARToken1() (any, error) {
return NewKeywordLiteral(c), nil
}
func (p *parser) callonLPARToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onLPARToken1()
}
func (c *current) onRPAR1(comments, t any) (any, error) {
kw := NewKeyword(comments.([]*Comment), t.(*KeywordLiteral), NewLocationFromCurrent(c))
return &RParKeyword{Keyword: kw}, nil
}
func (p *parser) callonRPAR1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onRPAR1(stack["comments"], stack["t"])
}
func (c *current) onRPARToken1() (any, error) {
return NewKeywordLiteral(c), nil
}
func (p *parser) callonRPARToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onRPARToken1()
}
func (c *current) onCOLON1(comments, t any) (any, error) {
kw := NewKeyword(comments.([]*Comment), t.(*KeywordLiteral), NewLocationFromCurrent(c))
return &ColonKeyword{Keyword: kw}, nil
}
func (p *parser) callonCOLON1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onCOLON1(stack["comments"], stack["t"])
}
func (c *current) onCOLONToken1() (any, error) {
return NewKeywordLiteral(c), nil
}
func (p *parser) callonCOLONToken1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onCOLONToken1()
}
func (c *current) onErrFieldIndex3() error {
return InvalidFieldIndexError
}
func (p *parser) callonErrFieldIndex3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrFieldIndex3()
}
func (c *current) onErrFieldIndex1() (any, error) {
// 消费到冒号或本行结束
return NewBadFieldIndex(NewLocationFromCurrent(c)), nil
}
func (p *parser) callonErrFieldIndex1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrFieldIndex1()
}
func (c *current) onErrStructField3() error {
return InvalidStructFieldError
}
func (p *parser) callonErrStructField3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrStructField3()
}
func (c *current) onErrStructField1() (any, error) {
return NewBadField(NewLocationFromCurrent(c)), nil
}
func (p *parser) callonErrStructField1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrStructField1()
}
func (c *current) onErrStructIdentifier3() error {
return InvalidStructIdentifierError
}
func (p *parser) callonErrStructIdentifier3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrStructIdentifier3()
}
func (c *current) onErrStructIdentifier1() (any, error) {
// struct identifier 异常,consume 掉异常字符直到出现 '{' 为止
t := NewBadIdentifier(NewLocationFromCurrent(c))
return t, nil
}
func (p *parser) callonErrStructIdentifier1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrStructIdentifier1()
}
func (c *current) onErrStructRCUR3() error {
return InvalidStructBlockRCURError
}
func (p *parser) callonErrStructRCUR3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrStructRCUR3()
}
func (c *current) onErrStructRCUR1() (any, error) {
// 缺少 '}',消费异常字符直到出现下一个 definition。TODO(jpf): 后面会加入 typedef 等定义,需要扩展补充
return NewBadKeywordLiteral(c), nil
}
func (p *parser) callonErrStructRCUR1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrStructRCUR1()
}
func (c *current) onErrStructLCUR3() error {
return InvalidStructBlockLCURError
}
func (p *parser) callonErrStructLCUR3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrStructLCUR3()
}
func (c *current) onErrStructLCUR1() (any, error) {
// 缺少 '{',消费异常字符直到出现下一个 Field 或者 '}'
return NewBadKeywordLiteral(c), nil
}
func (p *parser) callonErrStructLCUR1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrStructLCUR1()
}
func (c *current) onErrUnionIdentifier3() error {
return InvalidUnionIdentifierError
}
func (p *parser) callonErrUnionIdentifier3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrUnionIdentifier3()
}
func (c *current) onErrUnionIdentifier1() (any, error) {
// identifier 异常,consume 掉异常字符直到出现 '{' 为止
t := NewBadIdentifier(NewLocationFromCurrent(c))
return t, nil
}
func (p *parser) callonErrUnionIdentifier1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrUnionIdentifier1()
}
func (c *current) onErrUnionRCUR3() error {
return InvalidUnionBlockRCURError
}
func (p *parser) callonErrUnionRCUR3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrUnionRCUR3()
}
func (c *current) onErrUnionRCUR1() (any, error) {
return NewBadKeywordLiteral(c), nil
}
func (p *parser) callonErrUnionRCUR1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrUnionRCUR1()
}
func (c *current) onErrUnionLCUR3() error {
return InvalidUnionBlockLCURError
}
func (p *parser) callonErrUnionLCUR3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrUnionLCUR3()
}
func (c *current) onErrUnionLCUR1() (any, error) {
// 缺少 '{',消费异常字符直到出现下一个 Field 或者 '}'
return "?", nil
}
func (p *parser) callonErrUnionLCUR1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrUnionLCUR1()
}
func (c *current) onErrUnionField3() error {
return InvalidUnionFieldError
}
func (p *parser) callonErrUnionField3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrUnionField3()
}
func (c *current) onErrUnionField1() (any, error) {
return NewBadField(NewLocationFromCurrent(c)), nil
}
func (p *parser) callonErrUnionField1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrUnionField1()
}
func (c *current) onErrExceptionIdentifier3() error {
return InvalidExceptionIdentifierError
}
func (p *parser) callonErrExceptionIdentifier3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrExceptionIdentifier3()
}
func (c *current) onErrExceptionIdentifier1() (any, error) {
// identifier 异常,consume 掉异常字符直到出现 '{' 为止
t := NewBadIdentifier(NewLocationFromCurrent(c))
return t, nil
}
func (p *parser) callonErrExceptionIdentifier1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrExceptionIdentifier1()
}
func (c *current) onErrExceptionRCUR3() error {
return InvalidExceptionBlockRCURError
}
func (p *parser) callonErrExceptionRCUR3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrExceptionRCUR3()
}
func (c *current) onErrExceptionRCUR1() (any, error) {
return NewBadKeywordLiteral(c), nil
}
func (p *parser) callonErrExceptionRCUR1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrExceptionRCUR1()
}
func (c *current) onErrExceptionLCUR3() error {
return InvalidExceptionBlockLCURError
}
func (p *parser) callonErrExceptionLCUR3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrExceptionLCUR3()
}
func (c *current) onErrExceptionLCUR1() (any, error) {
// 缺少 '{',消费异常字符直到出现下一个 Field 或者 '}'
return NewBadKeywordLiteral(c), nil
}
func (p *parser) callonErrExceptionLCUR1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrExceptionLCUR1()
}
func (c *current) onErrExceptionField3() error {
return InvalidExceptionFieldError
}
func (p *parser) callonErrExceptionField3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrExceptionField3()
}
func (c *current) onErrExceptionField1() (any, error) {
return NewBadField(NewLocationFromCurrent(c)), nil
}
func (p *parser) callonErrExceptionField1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrExceptionField1()
}
func (c *current) onErrEnumIdentifier3() error {
return InvalidEnumIdentifierError
}
func (p *parser) callonErrEnumIdentifier3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrEnumIdentifier3()
}
func (c *current) onErrEnumIdentifier1() (any, error) {
// enum identifier 异常,consume 掉异常字符直到出现 '{' 为止
t := NewBadIdentifier(NewLocationFromCurrent(c))
return t, nil
}
func (p *parser) callonErrEnumIdentifier1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrEnumIdentifier1()
}
func (c *current) onErrEnumRCUR3() error {
return InvalidEnumBlockRCURError
}
func (p *parser) callonErrEnumRCUR3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrEnumRCUR3()
}
func (c *current) onErrEnumRCUR1() (any, error) {
return NewBadKeywordLiteral(c), nil
}
func (p *parser) callonErrEnumRCUR1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrEnumRCUR1()
}
func (c *current) onErrEnumLCUR3() error {
return InvalidEnumBlockLCURError
}
func (p *parser) callonErrEnumLCUR3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrEnumLCUR3()
}
func (c *current) onErrEnumLCUR1() (any, error) {
// 缺少 '{',消费异常字符直到出现下一个 Field 或者 '}'
return NewBadKeywordLiteral(c), nil
}
func (p *parser) callonErrEnumLCUR1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrEnumLCUR1()
}
func (c *current) onErrEnumValue3() error {
return InvalidEnumValueError
}
func (p *parser) callonErrEnumValue3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrEnumValue3()
}
func (c *current) onErrEnumValue1() (any, error) {
return NewBadEnumValue(NewLocationFromCurrent(c)), nil
}
func (p *parser) callonErrEnumValue1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrEnumValue1()
}
func (c *current) onErrEnumValueIntConstant3() error {
return InvalidEnumValueIntConstantError
}
func (p *parser) callonErrEnumValueIntConstant3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrEnumValueIntConstant3()
}
func (c *current) onErrEnumValueIntConstant1() (any, error) {
return NewBadIntConstValue(NewLocationFromCurrent(c)), nil
}
func (p *parser) callonErrEnumValueIntConstant1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrEnumValueIntConstant1()
}
func (c *current) onErrTypedefIdentifier3() error {
return InvalidTypedefIdentifierError
}
func (p *parser) callonErrTypedefIdentifier3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrTypedefIdentifier3()
}
func (c *current) onErrTypedefIdentifier1() (any, error) {
t := NewBadIdentifier(NewLocationFromCurrent(c))
return t, nil
}
func (p *parser) callonErrTypedefIdentifier1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrTypedefIdentifier1()
}
func (c *current) onErrConstIdentifier3() error {
return InvalidConstIdentifierError
}
func (p *parser) callonErrConstIdentifier3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrConstIdentifier3()
}
func (c *current) onErrConstIdentifier1() (any, error) {
t := NewBadIdentifier(NewLocationFromCurrent(c))
return t, nil
}
func (p *parser) callonErrConstIdentifier1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrConstIdentifier1()
}
func (c *current) onErrConstMissingValue3() error {
return InvalidConstMissingValueError
}
func (p *parser) callonErrConstMissingValue3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrConstMissingValue3()
}
func (c *current) onErrConstMissingValue1() (any, error) {
return NewBadConstValue(NewLocationFromCurrent(c)), nil
}
func (p *parser) callonErrConstMissingValue1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrConstMissingValue1()
}
func (c *current) onErrConstConstValue3() error {
return InvalidConstConstValueError
}
func (p *parser) callonErrConstConstValue3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrConstConstValue3()
}
func (c *current) onErrConstConstValue1() (any, error) {
return NewBadConstValue(NewLocationFromCurrent(c)), nil
}
func (p *parser) callonErrConstConstValue1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrConstConstValue1()
}
func (c *current) onErrServiceIdentifier3() error {
return InvalidServiceIdentifierError
}
func (p *parser) callonErrServiceIdentifier3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrServiceIdentifier3()
}
func (c *current) onErrServiceIdentifier1() (any, error) {
// identifier 异常,consume 掉异常字符直到出现 '{' 为止
t := NewBadIdentifier(NewLocationFromCurrent(c))
return t, nil
}
func (p *parser) callonErrServiceIdentifier1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrServiceIdentifier1()
}
func (c *current) onErrServiceRCUR3() error {
return InvalidServiceBlockRCURError
}
func (p *parser) callonErrServiceRCUR3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrServiceRCUR3()
}
func (c *current) onErrServiceRCUR1() (any, error) {
return NewBadKeywordLiteral(c), nil
}
func (p *parser) callonErrServiceRCUR1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrServiceRCUR1()
}
func (c *current) onErrServiceFunction3() error {
return InvalidServiceFunctionError
}
func (p *parser) callonErrServiceFunction3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrServiceFunction3()
}
func (c *current) onErrServiceFunction1() (any, error) {
return NewBadFunction(NewLocationFromCurrent(c)), nil
}
func (p *parser) callonErrServiceFunction1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrServiceFunction1()
}
func (c *current) onErrFunctionIdentifier3() error {
return InvalidFunctionIdentifierError
}
func (p *parser) callonErrFunctionIdentifier3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrFunctionIdentifier3()
}
func (c *current) onErrFunctionIdentifier1() (any, error) {
// identifier 异常,consume 掉异常字符直到出现 '(' 为止
t := NewBadIdentifier(NewLocationFromCurrent(c))
return t, nil
}
func (p *parser) callonErrFunctionIdentifier1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrFunctionIdentifier1()
}
func (c *current) onErrFunctionArgument3() error {
return InvalidFunctionArgumentError
}
func (p *parser) callonErrFunctionArgument3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrFunctionArgument3()
}
func (c *current) onErrFunctionArgument1() (any, error) {
return NewBadField(NewLocationFromCurrent(c)), nil
}
func (p *parser) callonErrFunctionArgument1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrFunctionArgument1()
}
func (c *current) onErrLiteral1MissingRight3() error {
return InvalidLiteral1MissingRightError
}
func (p *parser) callonErrLiteral1MissingRight3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrLiteral1MissingRight3()
}
func (c *current) onErrLiteral1MissingRight1() (any, error) {
// 消费异常字符直到这行结束
return NewBadLiteral(NewLocationFromCurrent(c)), nil
}
func (p *parser) callonErrLiteral1MissingRight1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrLiteral1MissingRight1()
}
func (c *current) onErrLiteral13() error {
return InvalidLiteral1Error
}
func (p *parser) callonErrLiteral13() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrLiteral13()
}
func (c *current) onErrLiteral11() (any, error) {
// 消费异常字符直到这行结束
return NewBadLiteral(NewLocationFromCurrent(c)), nil
}
func (p *parser) callonErrLiteral11() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrLiteral11()
}
func (c *current) onErrLiteral2MissingRight3() error {
return InvalidLiteral2MissingRightError
}
func (p *parser) callonErrLiteral2MissingRight3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrLiteral2MissingRight3()
}
func (c *current) onErrLiteral2MissingRight1() (any, error) {
// 消费异常字符直到这行结束
return NewBadLiteral(NewLocationFromCurrent(c)), nil
}
func (p *parser) callonErrLiteral2MissingRight1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrLiteral2MissingRight1()
}
func (c *current) onErrLiteral23() error {
return InvalidLiteral2Error
}
func (p *parser) callonErrLiteral23() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrLiteral23()
}
func (c *current) onErrLiteral21() (any, error) {
// 消费异常字符直到这行结束
return NewBadLiteral(NewLocationFromCurrent(c)), nil
}
func (p *parser) callonErrLiteral21() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrLiteral21()
}
func (c *current) onErrConst3() error {
return InvalidConstError
}
func (p *parser) callonErrConst3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrConst3()
}
func (c *current) onErrConst1() (any, error) {
// 消费异常字符直到这行结束
return NewBadConst(NewLocationFromCurrent(c)), nil
}
func (p *parser) callonErrConst1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrConst1()
}
func (c *current) onErrTypedef3() error {
return InvalidTypedefError
}
func (p *parser) callonErrTypedef3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrTypedef3()
}
func (c *current) onErrTypedef1() (any, error) {
// 消费异常字符直到这行结束
return NewBadTypedef(NewLocationFromCurrent(c)), nil
}
func (p *parser) callonErrTypedef1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrTypedef1()
}
func (c *current) onErrEnum3() error {
return InvalidEnumError
}
func (p *parser) callonErrEnum3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrEnum3()
}
func (c *current) onErrEnum1() (any, error) {
// 消费异常字符直到这行结束
return NewBadEnum(NewLocationFromCurrent(c)), nil
}
func (p *parser) callonErrEnum1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrEnum1()
}
func (c *current) onErrService3() error {
return InvalidServiceError
}
func (p *parser) callonErrService3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrService3()
}
func (c *current) onErrService1() (any, error) {
// 消费异常字符直到这行结束
return NewBadService(NewLocationFromCurrent(c)), nil
}
func (p *parser) callonErrService1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrService1()
}
func (c *current) onErrStruct3() error {
return InvalidStructError
}
func (p *parser) callonErrStruct3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrStruct3()
}
func (c *current) onErrStruct1() (any, error) {
// 消费异常字符直到这行结束
return NewBadStruct(NewLocationFromCurrent(c)), nil
}
func (p *parser) callonErrStruct1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrStruct1()
}
func (c *current) onErrUnion3() error {
return InvalidUnionError
}
func (p *parser) callonErrUnion3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrUnion3()
}
func (c *current) onErrUnion1() (any, error) {
// 消费异常字符直到这行结束
return NewBadUnion(NewLocationFromCurrent(c)), nil
}
func (p *parser) callonErrUnion1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrUnion1()
}
func (c *current) onErrException3() error {
return InvalidExceptionError
}
func (p *parser) callonErrException3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrException3()
}
func (c *current) onErrException1() (any, error) {
// 消费异常字符直到这行结束
return NewBadException(NewLocationFromCurrent(c)), nil
}
func (p *parser) callonErrException1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrException1()
}
func (c *current) onErrDefinition3() error {
return InvalidDefinitionError
}
func (p *parser) callonErrDefinition3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrDefinition3()
}
func (c *current) onErrDefinition1() (any, error) {
// 消费异常字符直到这行结束
return NewBadDefinition(NewLocationFromCurrent(c)), nil
}
func (p *parser) callonErrDefinition1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrDefinition1()
}
func (c *current) onErrInclude3() error {
return InvalidIncludeError
}
func (p *parser) callonErrInclude3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrInclude3()
}
func (c *current) onErrInclude1() (any, error) {
// 消费异常字符直到这行结束
return NewBadInclude(NewLocationFromCurrent(c)), nil
}
func (p *parser) callonErrInclude1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrInclude1()
}
func (c *current) onErrCppInclude3() error {
return InvalidCppIncludeError
}
func (p *parser) callonErrCppInclude3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrCppInclude3()
}
func (c *current) onErrCppInclude1() (any, error) {
// 消费异常字符直到这行结束
return NewBadCPPInclude(NewLocationFromCurrent(c)), nil
}
func (p *parser) callonErrCppInclude1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrCppInclude1()
}
func (c *current) onErrNamespace3() error {
return InvalidNamespaceError
}
func (p *parser) callonErrNamespace3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrNamespace3()
}
func (c *current) onErrNamespace1() (any, error) {
// 消费异常字符直到这行结束
return NewBadNamespace(NewLocationFromCurrent(c)), nil
}
func (p *parser) callonErrNamespace1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrNamespace1()
}
func (c *current) onErrHeader3() error {
return InvalidHeaderError
}
func (p *parser) callonErrHeader3() error {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrHeader3()
}
func (c *current) onErrHeader1() (any, error) {
// 消费异常字符直到这行结束
return NewBadHeader(NewLocationFromCurrent(c)), nil
}
func (p *parser) callonErrHeader1() (any, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onErrHeader1()
}
var (
// errNoRule is returned when the grammar to parse has no rule.
errNoRule = errors.New("grammar has no rule")
// errInvalidEntrypoint is returned when the specified entrypoint rule
// does not exit.
errInvalidEntrypoint = errors.New("invalid entrypoint")
// errInvalidEncoding is returned when the source is not properly
// utf8-encoded.
errInvalidEncoding = errors.New("invalid encoding")
// errMaxExprCnt is used to signal that the maximum number of
// expressions have been parsed.
errMaxExprCnt = errors.New("max number of expressions parsed")
)
// Option is a function that can set an option on the parser. It returns
// the previous setting as an Option.
type Option func(*parser) Option
// MaxExpressions creates an Option to stop parsing after the provided
// number of expressions have been parsed, if the value is 0 then the parser will
// parse for as many steps as needed (possibly an infinite number).
//
// The default for maxExprCnt is 0.
func MaxExpressions(maxExprCnt uint64) Option {
return func(p *parser) Option {
oldMaxExprCnt := p.maxExprCnt
p.maxExprCnt = maxExprCnt
return MaxExpressions(oldMaxExprCnt)
}
}
// Entrypoint creates an Option to set the rule name to use as entrypoint.
// The rule name must have been specified in the -alternate-entrypoints
// if generating the parser with the -optimize-grammar flag, otherwise
// it may have been optimized out. Passing an empty string sets the
// entrypoint to the first rule in the grammar.
//
// The default is to start parsing at the first rule in the grammar.
func Entrypoint(ruleName string) Option {
return func(p *parser) Option {
oldEntrypoint := p.entrypoint
p.entrypoint = ruleName
if ruleName == "" {
p.entrypoint = g.rules[0].name
}
return Entrypoint(oldEntrypoint)
}
}
// Statistics adds a user provided Stats struct to the parser to allow
// the user to process the results after the parsing has finished.
// Also the key for the "no match" counter is set.
//
// Example usage:
//
// input := "input"
// stats := Stats{}
// _, err := Parse("input-file", []byte(input), Statistics(&stats, "no match"))
// if err != nil {
// log.Panicln(err)
// }
// b, err := json.MarshalIndent(stats.ChoiceAltCnt, "", " ")
// if err != nil {
// log.Panicln(err)
// }
// fmt.Println(string(b))
func Statistics(stats *Stats, choiceNoMatch string) Option {
return func(p *parser) Option {
oldStats := p.Stats
p.Stats = stats
oldChoiceNoMatch := p.choiceNoMatch
p.choiceNoMatch = choiceNoMatch
if p.Stats.ChoiceAltCnt == nil {
p.Stats.ChoiceAltCnt = make(map[string]map[string]int)
}
return Statistics(oldStats, oldChoiceNoMatch)
}
}
// Debug creates an Option to set the debug flag to b. When set to true,
// debugging information is printed to stdout while parsing.
//
// The default is false.
func Debug(b bool) Option {
return func(p *parser) Option {
old := p.debug
p.debug = b
return Debug(old)
}
}
// Memoize creates an Option to set the memoize flag to b. When set to true,
// the parser will cache all results so each expression is evaluated only
// once. This guarantees linear parsing time even for pathological cases,
// at the expense of more memory and slower times for typical cases.
//
// The default is false.
func Memoize(b bool) Option {
return func(p *parser) Option {
old := p.memoize
p.memoize = b
return Memoize(old)
}
}
// AllowInvalidUTF8 creates an Option to allow invalid UTF-8 bytes.
// Every invalid UTF-8 byte is treated as a utf8.RuneError (U+FFFD)
// by character class matchers and is matched by the any matcher.
// The returned matched value, c.text and c.offset are NOT affected.
//
// The default is false.
func AllowInvalidUTF8(b bool) Option {
return func(p *parser) Option {
old := p.allowInvalidUTF8
p.allowInvalidUTF8 = b
return AllowInvalidUTF8(old)
}
}
// Recover creates an Option to set the recover flag to b. When set to
// true, this causes the parser to recover from panics and convert it
// to an error. Setting it to false can be useful while debugging to
// access the full stack trace.
//
// The default is true.
func Recover(b bool) Option {
return func(p *parser) Option {
old := p.recover
p.recover = b
return Recover(old)
}
}
// GlobalStore creates an Option to set a key to a certain value in
// the globalStore.
func GlobalStore(key string, value any) Option {
return func(p *parser) Option {
old := p.cur.globalStore[key]
p.cur.globalStore[key] = value
return GlobalStore(key, old)
}
}
// InitState creates an Option to set a key to a certain value in
// the global "state" store.
func InitState(key string, value any) Option {
return func(p *parser) Option {
old := p.cur.state[key]
p.cur.state[key] = value
return InitState(key, old)
}
}
// ParseFile parses the file identified by filename.
func ParseFile(filename string, opts ...Option) (i any, err error) {
f, err := os.Open(filename)
if err != nil {
return nil, err
}
defer func() {
if closeErr := f.Close(); closeErr != nil {
err = closeErr
}
}()
return ParseReader(filename, f, opts...)
}
// ParseReader parses the data from r using filename as information in the
// error messages.
func ParseReader(filename string, r io.Reader, opts ...Option) (any, error) {
b, err := io.ReadAll(r)
if err != nil {
return nil, err
}
return Parse(filename, b, opts...)
}
// Parse parses the data from b using filename as information in the
// error messages.
func Parse(filename string, b []byte, opts ...Option) (any, error) {
return newParser(filename, b, opts...).parse(g)
}
// position records a position in the text.
type position struct {
line, col, offset int
}
func (p position) String() string {
return strconv.Itoa(p.line) + ":" + strconv.Itoa(p.col) + " [" + strconv.Itoa(p.offset) + "]"
}
// savepoint stores all state required to go back to this point in the
// parser.
type savepoint struct {
position
rn rune
w int
}
type current struct {
pos position // start position of the match
text []byte // raw text of the match
// state is a store for arbitrary key,value pairs that the user wants to be
// tied to the backtracking of the parser.
// This is always rolled back if a parsing rule fails.
state storeDict
// globalStore is a general store for the user to store arbitrary key-value
// pairs that they need to manage and that they do not want tied to the
// backtracking of the parser. This is only modified by the user and never
// rolled back by the parser. It is always up to the user to keep this in a
// consistent state.
globalStore storeDict
}
type storeDict map[string]any
// the AST types...
type grammar struct {
pos position
rules []*rule
}
type rule struct {
pos position
name string
displayName string
expr any
}
type choiceExpr struct {
pos position
alternatives []any
}
type actionExpr struct {
pos position
expr any
run func(*parser) (any, error)
}
type recoveryExpr struct {
pos position
expr any
recoverExpr any
failureLabel []string
}
type seqExpr struct {
pos position
exprs []any
}
type throwExpr struct {
pos position
label string
}
type labeledExpr struct {
pos position
label string
expr any
}
type expr struct {
pos position
expr any
}
type (
andExpr expr
notExpr expr
zeroOrOneExpr expr
zeroOrMoreExpr expr
oneOrMoreExpr expr
)
type ruleRefExpr struct {
pos position
name string
}
type stateCodeExpr struct {
pos position
run func(*parser) error
}
type andCodeExpr struct {
pos position
run func(*parser) (bool, error)
}
type notCodeExpr struct {
pos position
run func(*parser) (bool, error)
}
type litMatcher struct {
pos position
val string
ignoreCase bool
want string
}
type charClassMatcher struct {
pos position
val string
basicLatinChars [128]bool
chars []rune
ranges []rune
classes []*unicode.RangeTable
ignoreCase bool
inverted bool
}
type anyMatcher position
// errList cumulates the errors found by the parser.
type errList []error
func (e *errList) add(err error) {
*e = append(*e, err)
}
func (e errList) err() error {
if len(e) == 0 {
return nil
}
e.dedupe()
return e
}
func (e *errList) dedupe() {
var cleaned []error
set := make(map[string]bool)
for _, err := range *e {
if msg := err.Error(); !set[msg] {
set[msg] = true
cleaned = append(cleaned, err)
}
}
*e = cleaned
}
func (e errList) Error() string {
switch len(e) {
case 0:
return ""
case 1:
return e[0].Error()
default:
var buf bytes.Buffer
for i, err := range e {
if i > 0 {
buf.WriteRune('\n')
}
buf.WriteString(err.Error())
}
return buf.String()
}
}
// parserError wraps an error with a prefix indicating the rule in which
// the error occurred. The original error is stored in the Inner field.
type parserError struct {
Inner error
pos position
prefix string
expected []string
}
// Error returns the error message.
func (p *parserError) Error() string {
return p.prefix + ": " + p.Inner.Error()
}
// newParser creates a parser with the specified input source and options.
func newParser(filename string, b []byte, opts ...Option) *parser {
stats := Stats{
ChoiceAltCnt: make(map[string]map[string]int),
}
p := &parser{
filename: filename,
errs: new(errList),
data: b,
pt: savepoint{position: position{line: 1}},
recover: true,
cur: current{
state: make(storeDict),
globalStore: make(storeDict),
},
maxFailPos: position{col: 1, line: 1},
maxFailExpected: make([]string, 0, 20),
Stats: &stats,
// start rule is rule [0] unless an alternate entrypoint is specified
entrypoint: g.rules[0].name,
}
p.setOptions(opts)
if p.maxExprCnt == 0 {
p.maxExprCnt = math.MaxUint64
}
return p
}
// setOptions applies the options to the parser.
func (p *parser) setOptions(opts []Option) {
for _, opt := range opts {
opt(p)
}
}
type resultTuple struct {
v any
b bool
end savepoint
}
const choiceNoMatch = -1
// Stats stores some statistics, gathered during parsing
type Stats struct {
// ExprCnt counts the number of expressions processed during parsing
// This value is compared to the maximum number of expressions allowed
// (set by the MaxExpressions option).
ExprCnt uint64
// ChoiceAltCnt is used to count for each ordered choice expression,
// which alternative is used how may times.
// These numbers allow to optimize the order of the ordered choice expression
// to increase the performance of the parser
//
// The outer key of ChoiceAltCnt is composed of the name of the rule as well
// as the line and the column of the ordered choice.
// The inner key of ChoiceAltCnt is the number (one-based) of the matching alternative.
// For each alternative the number of matches are counted. If an ordered choice does not
// match, a special counter is incremented. The name of this counter is set with
// the parser option Statistics.
// For an alternative to be included in ChoiceAltCnt, it has to match at least once.
ChoiceAltCnt map[string]map[string]int
}
type parser struct {
filename string
pt savepoint
cur current
data []byte
errs *errList
depth int
recover bool
debug bool
memoize bool
// memoization table for the packrat algorithm:
// map[offset in source] map[expression or rule] {value, match}
memo map[int]map[any]resultTuple
// rules table, maps the rule identifier to the rule node
rules map[string]*rule
// variables stack, map of label to value
vstack []map[string]any
// rule stack, allows identification of the current rule in errors
rstack []*rule
// parse fail
maxFailPos position
maxFailExpected []string
maxFailInvertExpected bool
// max number of expressions to be parsed
maxExprCnt uint64
// entrypoint for the parser
entrypoint string
allowInvalidUTF8 bool
*Stats
choiceNoMatch string
// recovery expression stack, keeps track of the currently available recovery expression, these are traversed in reverse
recoveryStack []map[string]any
}
// push a variable set on the vstack.
func (p *parser) pushV() {
if cap(p.vstack) == len(p.vstack) {
// create new empty slot in the stack
p.vstack = append(p.vstack, nil)
} else {
// slice to 1 more
p.vstack = p.vstack[:len(p.vstack)+1]
}
// get the last args set
m := p.vstack[len(p.vstack)-1]
if m != nil && len(m) == 0 {
// empty map, all good
return
}
m = make(map[string]any)
p.vstack[len(p.vstack)-1] = m
}
// pop a variable set from the vstack.
func (p *parser) popV() {
// if the map is not empty, clear it
m := p.vstack[len(p.vstack)-1]
if len(m) > 0 {
// GC that map
p.vstack[len(p.vstack)-1] = nil
}
p.vstack = p.vstack[:len(p.vstack)-1]
}
// push a recovery expression with its labels to the recoveryStack
func (p *parser) pushRecovery(labels []string, expr any) {
if cap(p.recoveryStack) == len(p.recoveryStack) {
// create new empty slot in the stack
p.recoveryStack = append(p.recoveryStack, nil)
} else {
// slice to 1 more
p.recoveryStack = p.recoveryStack[:len(p.recoveryStack)+1]
}
m := make(map[string]any, len(labels))
for _, fl := range labels {
m[fl] = expr
}
p.recoveryStack[len(p.recoveryStack)-1] = m
}
// pop a recovery expression from the recoveryStack
func (p *parser) popRecovery() {
// GC that map
p.recoveryStack[len(p.recoveryStack)-1] = nil
p.recoveryStack = p.recoveryStack[:len(p.recoveryStack)-1]
}
func (p *parser) print(prefix, s string) string {
if !p.debug {
return s
}
fmt.Printf("%s %d:%d:%d: %s [%#U]\n",
prefix, p.pt.line, p.pt.col, p.pt.offset, s, p.pt.rn)
return s
}
func (p *parser) printIndent(mark string, s string) string {
return p.print(strings.Repeat(" ", p.depth)+mark, s)
}
func (p *parser) in(s string) string {
res := p.printIndent(">", s)
p.depth++
return res
}
func (p *parser) out(s string) string {
p.depth--
return p.printIndent("<", s)
}
func (p *parser) addErr(err error) {
p.addErrAt(err, p.pt.position, []string{})
}
func (p *parser) addErrAt(err error, pos position, expected []string) {
var buf bytes.Buffer
if p.filename != "" {
buf.WriteString(p.filename)
}
if buf.Len() > 0 {
buf.WriteString(":")
}
buf.WriteString(fmt.Sprintf("%d:%d (%d)", pos.line, pos.col, pos.offset))
if len(p.rstack) > 0 {
if buf.Len() > 0 {
buf.WriteString(": ")
}
rule := p.rstack[len(p.rstack)-1]
if rule.displayName != "" {
buf.WriteString("rule " + rule.displayName)
} else {
buf.WriteString("rule " + rule.name)
}
}
pe := &parserError{Inner: err, pos: pos, prefix: buf.String(), expected: expected}
p.errs.add(pe)
}
func (p *parser) failAt(fail bool, pos position, want string) {
// process fail if parsing fails and not inverted or parsing succeeds and invert is set
if fail == p.maxFailInvertExpected {
if pos.offset < p.maxFailPos.offset {
return
}
if pos.offset > p.maxFailPos.offset {
p.maxFailPos = pos
p.maxFailExpected = p.maxFailExpected[:0]
}
if p.maxFailInvertExpected {
want = "!" + want
}
p.maxFailExpected = append(p.maxFailExpected, want)
}
}
// read advances the parser to the next rune.
func (p *parser) read() {
p.pt.offset += p.pt.w
rn, n := utf8.DecodeRune(p.data[p.pt.offset:])
p.pt.rn = rn
p.pt.w = n
p.pt.col++
if rn == '\n' {
p.pt.line++
p.pt.col = 0
}
if rn == utf8.RuneError && n == 1 { // see utf8.DecodeRune
if !p.allowInvalidUTF8 {
p.addErr(errInvalidEncoding)
}
}
}
// restore parser position to the savepoint pt.
func (p *parser) restore(pt savepoint) {
if p.debug {
defer p.out(p.in("restore"))
}
if pt.offset == p.pt.offset {
return
}
p.pt = pt
}
// Cloner is implemented by any value that has a Clone method, which returns a
// copy of the value. This is mainly used for types which are not passed by
// value (e.g map, slice, chan) or structs that contain such types.
//
// This is used in conjunction with the global state feature to create proper
// copies of the state to allow the parser to properly restore the state in
// the case of backtracking.
type Cloner interface {
Clone() any
}
var statePool = &sync.Pool{
New: func() any { return make(storeDict) },
}
func (sd storeDict) Discard() {
for k := range sd {
delete(sd, k)
}
statePool.Put(sd)
}
// clone and return parser current state.
func (p *parser) cloneState() storeDict {
if p.debug {
defer p.out(p.in("cloneState"))
}
state := statePool.Get().(storeDict)
for k, v := range p.cur.state {
if c, ok := v.(Cloner); ok {
state[k] = c.Clone()
} else {
state[k] = v
}
}
return state
}
// restore parser current state to the state storeDict.
// every restoreState should applied only one time for every cloned state
func (p *parser) restoreState(state storeDict) {
if p.debug {
defer p.out(p.in("restoreState"))
}
p.cur.state.Discard()
p.cur.state = state
}
// get the slice of bytes from the savepoint start to the current position.
func (p *parser) sliceFrom(start savepoint) []byte {
return p.data[start.position.offset:p.pt.position.offset]
}
func (p *parser) getMemoized(node any) (resultTuple, bool) {
if len(p.memo) == 0 {
return resultTuple{}, false
}
m := p.memo[p.pt.offset]
if len(m) == 0 {
return resultTuple{}, false
}
res, ok := m[node]
return res, ok
}
func (p *parser) setMemoized(pt savepoint, node any, tuple resultTuple) {
if p.memo == nil {
p.memo = make(map[int]map[any]resultTuple)
}
m := p.memo[pt.offset]
if m == nil {
m = make(map[any]resultTuple)
p.memo[pt.offset] = m
}
m[node] = tuple
}
func (p *parser) buildRulesTable(g *grammar) {
p.rules = make(map[string]*rule, len(g.rules))
for _, r := range g.rules {
p.rules[r.name] = r
}
}
func (p *parser) parse(g *grammar) (val any, err error) {
if len(g.rules) == 0 {
p.addErr(errNoRule)
return nil, p.errs.err()
}
// TODO : not super critical but this could be generated
p.buildRulesTable(g)
if p.recover {
// panic can be used in action code to stop parsing immediately
// and return the panic as an error.
defer func() {
if e := recover(); e != nil {
if p.debug {
defer p.out(p.in("panic handler"))
}
val = nil
switch e := e.(type) {
case error:
p.addErr(e)
default:
p.addErr(fmt.Errorf("%v", e))
}
err = p.errs.err()
}
}()
}
startRule, ok := p.rules[p.entrypoint]
if !ok {
p.addErr(errInvalidEntrypoint)
return nil, p.errs.err()
}
p.read() // advance to first rune
val, ok = p.parseRuleWrap(startRule)
if !ok {
if len(*p.errs) == 0 {
// If parsing fails, but no errors have been recorded, the expected values
// for the farthest parser position are returned as error.
maxFailExpectedMap := make(map[string]struct{}, len(p.maxFailExpected))
for _, v := range p.maxFailExpected {
maxFailExpectedMap[v] = struct{}{}
}
expected := make([]string, 0, len(maxFailExpectedMap))
eof := false
if _, ok := maxFailExpectedMap["!."]; ok {
delete(maxFailExpectedMap, "!.")
eof = true
}
for k := range maxFailExpectedMap {
expected = append(expected, k)
}
sort.Strings(expected)
if eof {
expected = append(expected, "EOF")
}
p.addErrAt(errors.New("no match found, expected: "+listJoin(expected, ", ", "or")), p.maxFailPos, expected)
}
return nil, p.errs.err()
}
return val, p.errs.err()
}
func listJoin(list []string, sep string, lastSep string) string {
switch len(list) {
case 0:
return ""
case 1:
return list[0]
default:
return strings.Join(list[:len(list)-1], sep) + " " + lastSep + " " + list[len(list)-1]
}
}
func (p *parser) parseRuleMemoize(rule *rule) (any, bool) {
res, ok := p.getMemoized(rule)
if ok {
p.restore(res.end)
return res.v, res.b
}
startMark := p.pt
val, ok := p.parseRule(rule)
p.setMemoized(startMark, rule, resultTuple{val, ok, p.pt})
return val, ok
}
func (p *parser) parseRuleWrap(rule *rule) (any, bool) {
if p.debug {
defer p.out(p.in("parseRule " + rule.name))
}
var (
val any
ok bool
startMark = p.pt
)
if p.memoize {
val, ok = p.parseRuleMemoize(rule)
} else {
val, ok = p.parseRule(rule)
}
if ok && p.debug {
p.printIndent("MATCH", string(p.sliceFrom(startMark)))
}
return val, ok
}
func (p *parser) parseRule(rule *rule) (any, bool) {
p.rstack = append(p.rstack, rule)
p.pushV()
val, ok := p.parseExprWrap(rule.expr)
p.popV()
p.rstack = p.rstack[:len(p.rstack)-1]
return val, ok
}
func (p *parser) parseExprWrap(expr any) (any, bool) {
var pt savepoint
if p.memoize {
res, ok := p.getMemoized(expr)
if ok {
p.restore(res.end)
return res.v, res.b
}
pt = p.pt
}
val, ok := p.parseExpr(expr)
if p.memoize {
p.setMemoized(pt, expr, resultTuple{val, ok, p.pt})
}
return val, ok
}
func (p *parser) parseExpr(expr any) (any, bool) {
p.ExprCnt++
if p.ExprCnt > p.maxExprCnt {
panic(errMaxExprCnt)
}
var val any
var ok bool
switch expr := expr.(type) {
case *actionExpr:
val, ok = p.parseActionExpr(expr)
case *andCodeExpr:
val, ok = p.parseAndCodeExpr(expr)
case *andExpr:
val, ok = p.parseAndExpr(expr)
case *anyMatcher:
val, ok = p.parseAnyMatcher(expr)
case *charClassMatcher:
val, ok = p.parseCharClassMatcher(expr)
case *choiceExpr:
val, ok = p.parseChoiceExpr(expr)
case *labeledExpr:
val, ok = p.parseLabeledExpr(expr)
case *litMatcher:
val, ok = p.parseLitMatcher(expr)
case *notCodeExpr:
val, ok = p.parseNotCodeExpr(expr)
case *notExpr:
val, ok = p.parseNotExpr(expr)
case *oneOrMoreExpr:
val, ok = p.parseOneOrMoreExpr(expr)
case *recoveryExpr:
val, ok = p.parseRecoveryExpr(expr)
case *ruleRefExpr:
val, ok = p.parseRuleRefExpr(expr)
case *seqExpr:
val, ok = p.parseSeqExpr(expr)
case *stateCodeExpr:
val, ok = p.parseStateCodeExpr(expr)
case *throwExpr:
val, ok = p.parseThrowExpr(expr)
case *zeroOrMoreExpr:
val, ok = p.parseZeroOrMoreExpr(expr)
case *zeroOrOneExpr:
val, ok = p.parseZeroOrOneExpr(expr)
default:
panic(fmt.Sprintf("unknown expression type %T", expr))
}
return val, ok
}
func (p *parser) parseActionExpr(act *actionExpr) (any, bool) {
if p.debug {
defer p.out(p.in("parseActionExpr"))
}
start := p.pt
val, ok := p.parseExprWrap(act.expr)
if ok {
p.cur.pos = start.position
p.cur.text = p.sliceFrom(start)
state := p.cloneState()
actVal, err := act.run(p)
if err != nil {
p.addErrAt(err, start.position, []string{})
}
p.restoreState(state)
val = actVal
}
if ok && p.debug {
p.printIndent("MATCH", string(p.sliceFrom(start)))
}
return val, ok
}
func (p *parser) parseAndCodeExpr(and *andCodeExpr) (any, bool) {
if p.debug {
defer p.out(p.in("parseAndCodeExpr"))
}
state := p.cloneState()
ok, err := and.run(p)
if err != nil {
p.addErr(err)
}
p.restoreState(state)
return nil, ok
}
func (p *parser) parseAndExpr(and *andExpr) (any, bool) {
if p.debug {
defer p.out(p.in("parseAndExpr"))
}
pt := p.pt
state := p.cloneState()
p.pushV()
_, ok := p.parseExprWrap(and.expr)
p.popV()
p.restoreState(state)
p.restore(pt)
return nil, ok
}
func (p *parser) parseAnyMatcher(any *anyMatcher) (any, bool) {
if p.debug {
defer p.out(p.in("parseAnyMatcher"))
}
if p.pt.rn == utf8.RuneError && p.pt.w == 0 {
// EOF - see utf8.DecodeRune
p.failAt(false, p.pt.position, ".")
return nil, false
}
start := p.pt
p.read()
p.failAt(true, start.position, ".")
return p.sliceFrom(start), true
}
func (p *parser) parseCharClassMatcher(chr *charClassMatcher) (any, bool) {
if p.debug {
defer p.out(p.in("parseCharClassMatcher"))
}
cur := p.pt.rn
start := p.pt
// can't match EOF
if cur == utf8.RuneError && p.pt.w == 0 { // see utf8.DecodeRune
p.failAt(false, start.position, chr.val)
return nil, false
}
if chr.ignoreCase {
cur = unicode.ToLower(cur)
}
// try to match in the list of available chars
for _, rn := range chr.chars {
if rn == cur {
if chr.inverted {
p.failAt(false, start.position, chr.val)
return nil, false
}
p.read()
p.failAt(true, start.position, chr.val)
return p.sliceFrom(start), true
}
}
// try to match in the list of ranges
for i := 0; i < len(chr.ranges); i += 2 {
if cur >= chr.ranges[i] && cur <= chr.ranges[i+1] {
if chr.inverted {
p.failAt(false, start.position, chr.val)
return nil, false
}
p.read()
p.failAt(true, start.position, chr.val)
return p.sliceFrom(start), true
}
}
// try to match in the list of Unicode classes
for _, cl := range chr.classes {
if unicode.Is(cl, cur) {
if chr.inverted {
p.failAt(false, start.position, chr.val)
return nil, false
}
p.read()
p.failAt(true, start.position, chr.val)
return p.sliceFrom(start), true
}
}
if chr.inverted {
p.read()
p.failAt(true, start.position, chr.val)
return p.sliceFrom(start), true
}
p.failAt(false, start.position, chr.val)
return nil, false
}
func (p *parser) incChoiceAltCnt(ch *choiceExpr, altI int) {
choiceIdent := fmt.Sprintf("%s %d:%d", p.rstack[len(p.rstack)-1].name, ch.pos.line, ch.pos.col)
m := p.ChoiceAltCnt[choiceIdent]
if m == nil {
m = make(map[string]int)
p.ChoiceAltCnt[choiceIdent] = m
}
// We increment altI by 1, so the keys do not start at 0
alt := strconv.Itoa(altI + 1)
if altI == choiceNoMatch {
alt = p.choiceNoMatch
}
m[alt]++
}
func (p *parser) parseChoiceExpr(ch *choiceExpr) (any, bool) {
if p.debug {
defer p.out(p.in("parseChoiceExpr"))
}
for altI, alt := range ch.alternatives {
// dummy assignment to prevent compile error if optimized
_ = altI
state := p.cloneState()
p.pushV()
val, ok := p.parseExprWrap(alt)
p.popV()
if ok {
p.incChoiceAltCnt(ch, altI)
return val, ok
}
p.restoreState(state)
}
p.incChoiceAltCnt(ch, choiceNoMatch)
return nil, false
}
func (p *parser) parseLabeledExpr(lab *labeledExpr) (any, bool) {
if p.debug {
defer p.out(p.in("parseLabeledExpr"))
}
p.pushV()
val, ok := p.parseExprWrap(lab.expr)
p.popV()
if ok && lab.label != "" {
m := p.vstack[len(p.vstack)-1]
m[lab.label] = val
}
return val, ok
}
func (p *parser) parseLitMatcher(lit *litMatcher) (any, bool) {
if p.debug {
defer p.out(p.in("parseLitMatcher"))
}
start := p.pt
for _, want := range lit.val {
cur := p.pt.rn
if lit.ignoreCase {
cur = unicode.ToLower(cur)
}
if cur != want {
p.failAt(false, start.position, lit.want)
p.restore(start)
return nil, false
}
p.read()
}
p.failAt(true, start.position, lit.want)
return p.sliceFrom(start), true
}
func (p *parser) parseNotCodeExpr(not *notCodeExpr) (any, bool) {
if p.debug {
defer p.out(p.in("parseNotCodeExpr"))
}
state := p.cloneState()
ok, err := not.run(p)
if err != nil {
p.addErr(err)
}
p.restoreState(state)
return nil, !ok
}
func (p *parser) parseNotExpr(not *notExpr) (any, bool) {
if p.debug {
defer p.out(p.in("parseNotExpr"))
}
pt := p.pt
state := p.cloneState()
p.pushV()
p.maxFailInvertExpected = !p.maxFailInvertExpected
_, ok := p.parseExprWrap(not.expr)
p.maxFailInvertExpected = !p.maxFailInvertExpected
p.popV()
p.restoreState(state)
p.restore(pt)
return nil, !ok
}
func (p *parser) parseOneOrMoreExpr(expr *oneOrMoreExpr) (any, bool) {
if p.debug {
defer p.out(p.in("parseOneOrMoreExpr"))
}
var vals []any
for {
p.pushV()
val, ok := p.parseExprWrap(expr.expr)
p.popV()
if !ok {
if len(vals) == 0 {
// did not match once, no match
return nil, false
}
return vals, true
}
vals = append(vals, val)
}
}
func (p *parser) parseRecoveryExpr(recover *recoveryExpr) (any, bool) {
if p.debug {
defer p.out(p.in("parseRecoveryExpr (" + strings.Join(recover.failureLabel, ",") + ")"))
}
p.pushRecovery(recover.failureLabel, recover.recoverExpr)
val, ok := p.parseExprWrap(recover.expr)
p.popRecovery()
return val, ok
}
func (p *parser) parseRuleRefExpr(ref *ruleRefExpr) (any, bool) {
if p.debug {
defer p.out(p.in("parseRuleRefExpr " + ref.name))
}
if ref.name == "" {
panic(fmt.Sprintf("%s: invalid rule: missing name", ref.pos))
}
rule := p.rules[ref.name]
if rule == nil {
p.addErr(fmt.Errorf("undefined rule: %s", ref.name))
return nil, false
}
return p.parseRuleWrap(rule)
}
func (p *parser) parseSeqExpr(seq *seqExpr) (any, bool) {
if p.debug {
defer p.out(p.in("parseSeqExpr"))
}
vals := make([]any, 0, len(seq.exprs))
pt := p.pt
state := p.cloneState()
for _, expr := range seq.exprs {
val, ok := p.parseExprWrap(expr)
if !ok {
p.restoreState(state)
p.restore(pt)
return nil, false
}
vals = append(vals, val)
}
return vals, true
}
func (p *parser) parseStateCodeExpr(state *stateCodeExpr) (any, bool) {
if p.debug {
defer p.out(p.in("parseStateCodeExpr"))
}
err := state.run(p)
if err != nil {
p.addErr(err)
}
return nil, true
}
func (p *parser) parseThrowExpr(expr *throwExpr) (any, bool) {
if p.debug {
defer p.out(p.in("parseThrowExpr"))
}
for i := len(p.recoveryStack) - 1; i >= 0; i-- {
if recoverExpr, ok := p.recoveryStack[i][expr.label]; ok {
if val, ok := p.parseExprWrap(recoverExpr); ok {
return val, ok
}
}
}
return nil, false
}
func (p *parser) parseZeroOrMoreExpr(expr *zeroOrMoreExpr) (any, bool) {
if p.debug {
defer p.out(p.in("parseZeroOrMoreExpr"))
}
var vals []any
for {
p.pushV()
val, ok := p.parseExprWrap(expr.expr)
p.popV()
if !ok {
return vals, true
}
vals = append(vals, val)
}
}
func (p *parser) parseZeroOrOneExpr(expr *zeroOrOneExpr) (any, bool) {
if p.debug {
defer p.out(p.in("parseZeroOrOneExpr"))
}
p.pushV()
val, _ := p.parseExprWrap(expr.expr)
p.popV()
// whether it matched or not, consider it a match
return val, true
}
package parser
func StringPointer(s string) *string {
return &s
}
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package diff
import (
"bytes"
"fmt"
"sort"
"strings"
)
// A pair is a pair of values tracked for both the x and y side of a diff.
// It is typically a pair of line indexes.
type pair struct{ x, y int }
// Diff returns an anchored diff of the two texts old and new
// in the “unified diff” format. If old and new are identical,
// Diff returns a nil slice (no output).
//
// Unix diff implementations typically look for a diff with
// the smallest number of lines inserted and removed,
// which can in the worst case take time quadratic in the
// number of lines in the texts. As a result, many implementations
// either can be made to run for a long time or cut off the search
// after a predetermined amount of work.
//
// In contrast, this implementation looks for a diff with the
// smallest number of “unique” lines inserted and removed,
// where unique means a line that appears just once in both old and new.
// We call this an “anchored diff” because the unique lines anchor
// the chosen matching regions. An anchored diff is usually clearer
// than a standard diff, because the algorithm does not try to
// reuse unrelated blank lines or closing braces.
// The algorithm also guarantees to run in O(n log n) time
// instead of the standard O(n²) time.
//
// Some systems call this approach a “patience diff,” named for
// the “patience sorting” algorithm, itself named for a solitaire card game.
// We avoid that name for two reasons. First, the name has been used
// for a few different variants of the algorithm, so it is imprecise.
// Second, the name is frequently interpreted as meaning that you have
// to wait longer (to be patient) for the diff, meaning that it is a slower algorithm,
// when in fact the algorithm is faster than the standard one.
func Diff(oldName string, old []byte, newName string, new []byte) []byte {
if bytes.Equal(old, new) {
return nil
}
x := lines(old)
y := lines(new)
// Print diff header.
var out bytes.Buffer
fmt.Fprintf(&out, "diff %s %s\n", oldName, newName)
fmt.Fprintf(&out, "--- %s\n", oldName)
fmt.Fprintf(&out, "+++ %s\n", newName)
// Loop over matches to consider,
// expanding each match to include surrounding lines,
// and then printing diff chunks.
// To avoid setup/teardown cases outside the loop,
// tgs returns a leading {0,0} and trailing {len(x), len(y)} pair
// in the sequence of matches.
var (
done pair // printed up to x[:done.x] and y[:done.y]
chunk pair // start lines of current chunk
count pair // number of lines from each side in current chunk
ctext []string // lines for current chunk
)
for _, m := range tgs(x, y) {
if m.x < done.x {
// Already handled scanning forward from earlier match.
continue
}
// Expand matching lines as far possible,
// establishing that x[start.x:end.x] == y[start.y:end.y].
// Note that on the first (or last) iteration we may (or definitey do)
// have an empty match: start.x==end.x and start.y==end.y.
start := m
for start.x > done.x && start.y > done.y && x[start.x-1] == y[start.y-1] {
start.x--
start.y--
}
end := m
for end.x < len(x) && end.y < len(y) && x[end.x] == y[end.y] {
end.x++
end.y++
}
// Emit the mismatched lines before start into this chunk.
// (No effect on first sentinel iteration, when start = {0,0}.)
for _, s := range x[done.x:start.x] {
ctext = append(ctext, "-"+s)
count.x++
}
for _, s := range y[done.y:start.y] {
ctext = append(ctext, "+"+s)
count.y++
}
// If we're not at EOF and have too few common lines,
// the chunk includes all the common lines and continues.
const C = 3 // number of context lines
if (end.x < len(x) || end.y < len(y)) &&
(end.x-start.x < C || (len(ctext) > 0 && end.x-start.x < 2*C)) {
for _, s := range x[start.x:end.x] {
ctext = append(ctext, " "+s)
count.x++
count.y++
}
done = end
continue
}
// End chunk with common lines for context.
if len(ctext) > 0 {
n := end.x - start.x
if n > C {
n = C
}
for _, s := range x[start.x : start.x+n] {
ctext = append(ctext, " "+s)
count.x++
count.y++
}
done = pair{start.x + n, start.y + n}
// Format and emit chunk.
// Convert line numbers to 1-indexed.
// Special case: empty file shows up as 0,0 not 1,0.
if count.x > 0 {
chunk.x++
}
if count.y > 0 {
chunk.y++
}
fmt.Fprintf(&out, "@@ -%d,%d +%d,%d @@\n", chunk.x, count.x, chunk.y, count.y)
for _, s := range ctext {
out.WriteString(s)
}
count.x = 0
count.y = 0
ctext = ctext[:0]
}
// If we reached EOF, we're done.
if end.x >= len(x) && end.y >= len(y) {
break
}
// Otherwise start a new chunk.
chunk = pair{end.x - C, end.y - C}
for _, s := range x[chunk.x:end.x] {
ctext = append(ctext, " "+s)
count.x++
count.y++
}
done = end
}
return out.Bytes()
}
// lines returns the lines in the file x, including newlines.
// If the file does not end in a newline, one is supplied
// along with a warning about the missing newline.
func lines(x []byte) []string {
l := strings.SplitAfter(string(x), "\n")
if l[len(l)-1] == "" {
l = l[:len(l)-1]
} else {
// Treat last line as having a message about the missing newline attached,
// using the same text as BSD/GNU diff (including the leading backslash).
l[len(l)-1] += "\n\\ No newline at end of file\n"
}
return l
}
// tgs returns the pairs of indexes of the longest common subsequence
// of unique lines in x and y, where a unique line is one that appears
// once in x and once in y.
//
// The longest common subsequence algorithm is as described in
// Thomas G. Szymanski, “A Special Case of the Maximal Common
// Subsequence Problem,” Princeton TR #170 (January 1975),
// available at https://research.swtch.com/tgs170.pdf.
func tgs(x, y []string) []pair {
// Count the number of times each string appears in a and b.
// We only care about 0, 1, many, counted as 0, -1, -2
// for the x side and 0, -4, -8 for the y side.
// Using negative numbers now lets us distinguish positive line numbers later.
m := make(map[string]int)
for _, s := range x {
if c := m[s]; c > -2 {
m[s] = c - 1
}
}
for _, s := range y {
if c := m[s]; c > -8 {
m[s] = c - 4
}
}
// Now unique strings can be identified by m[s] = -1+-4.
//
// Gather the indexes of those strings in x and y, building:
// xi[i] = increasing indexes of unique strings in x.
// yi[i] = increasing indexes of unique strings in y.
// inv[i] = index j such that x[xi[i]] = y[yi[j]].
var xi, yi, inv []int
for i, s := range y {
if m[s] == -1+-4 {
m[s] = len(yi)
yi = append(yi, i)
}
}
for i, s := range x {
if j, ok := m[s]; ok && j >= 0 {
xi = append(xi, i)
inv = append(inv, j)
}
}
// Apply Algorithm A from Szymanski's paper.
// In those terms, A = J = inv and B = [0, n).
// We add sentinel pairs {0,0}, and {len(x),len(y)}
// to the returned sequence, to help the processing loop.
J := inv
n := len(xi)
T := make([]int, n)
L := make([]int, n)
for i := range T {
T[i] = n + 1
}
for i := 0; i < n; i++ {
k := sort.Search(n, func(k int) bool {
return T[k] >= J[i]
})
T[k] = J[i]
L[i] = k + 1
}
k := 0
for _, v := range L {
if k < v {
k = v
}
}
seq := make([]pair, 2+k)
seq[1+k] = pair{len(x), len(y)} // sentinel at end
lastj := n
for i := n - 1; i >= 0; i-- {
if L[i] == k && J[i] < lastj {
seq[k] = pair{xi[i], yi[J[i]]}
k--
}
}
seq[0] = pair{0, 0} // sentinel at start
return seq
}
package errors
import "fmt"
type AggregateError struct {
errs []error
}
func NewAggregate(errs []error) *AggregateError {
return &AggregateError{
errs: errs,
}
}
func (e *AggregateError) Error() string {
if len(e.errs) == 0 {
return ""
}
return fmt.Sprintf("%v", e.errs)
}
package utils
import (
"encoding/json"
"fmt"
"os"
)
func MustDumpJsonToFile(obj any, file string) {
data, err := json.MarshalIndent(obj, "", " ")
if err != nil {
panic(err)
}
os.WriteFile(file, data, os.ModePerm)
}
func MustPrintJson(obj any) {
data, err := json.MarshalIndent(obj, "", " ")
if err != nil {
panic(err)
}
fmt.Println(string(data))
}
package utils import "reflect" func IsNil(v any) bool { if v == nil { return true } return reflect.ValueOf(v).IsNil() }
package utils
func Space(c byte) bool {
return c == ' ' || c == '\t' || c == '\n' || c == '\r'
}