chroma package - github.com/alecthomas/chroma/v2 - Go Packages

Package chroma takes source code and other structured text and converts it into syntax highlighted HTML, ANSI- coloured text, etc.

Chroma is based heavily on Pygments, and includes translators for Pygments lexers and styles.

For more information, go here: https://github.com/alecthomas/chroma

View Source

const (
	Whitespace = TextWhitespace

	Date = LiteralDate

	String          = LiteralString
	StringAffix     = LiteralStringAffix
	StringBacktick  = LiteralStringBacktick
	StringChar      = LiteralStringChar
	StringDelimiter = LiteralStringDelimiter
	StringDoc       = LiteralStringDoc
	StringDouble    = LiteralStringDouble
	StringEscape    = LiteralStringEscape
	StringHeredoc   = LiteralStringHeredoc
	StringInterpol  = LiteralStringInterpol
	StringOther     = LiteralStringOther
	StringRegex     = LiteralStringRegex
	StringSingle    = LiteralStringSingle
	StringSymbol    = LiteralStringSymbol

	Number            = LiteralNumber
	NumberBin         = LiteralNumberBin
	NumberFloat       = LiteralNumberFloat
	NumberHex         = LiteralNumberHex
	NumberInteger     = LiteralNumberInteger
	NumberIntegerLong = LiteralNumberIntegerLong
	NumberOct         = LiteralNumberOct
)

Aliases.

View Source

var ANSI2RGB = map[string]string{
	"#ansiblack":     "000000",
	"#ansidarkred":   "7f0000",
	"#ansidarkgreen": "007f00",
	"#ansibrown":     "7f7fe0",
	"#ansidarkblue":  "00007f",
	"#ansipurple":    "7f007f",
	"#ansiteal":      "007f7f",
	"#ansilightgray": "e5e5e5",

	"#ansidarkgray":  "555555",
	"#ansired":       "ff0000",
	"#ansigreen":     "00ff00",
	"#ansiyellow":    "ffff00",
	"#ansiblue":      "0000ff",
	"#ansifuchsia":   "ff00ff",
	"#ansiturquoise": "00ffff",
	"#ansiwhite":     "ffffff",

	"#black":     "000000",
	"#darkred":   "7f0000",
	"#darkgreen": "007f00",
	"#brown":     "7f7fe0",
	"#darkblue":  "00007f",
	"#purple":    "7f007f",
	"#teal":      "007f7f",
	"#lightgray": "e5e5e5",

	"#darkgray":  "555555",
	"#red":       "ff0000",
	"#green":     "00ff00",
	"#yellow":    "ffff00",
	"#blue":      "0000ff",
	"#fuchsia":   "ff00ff",
	"#turquoise": "00ffff",
	"#white":     "ffffff",
}

ANSI2RGB maps ANSI colour names, as supported by Chroma, to hex RGB values.

Serialisation of Chroma rules to XML. The format is:

<rules>
  <state name="$STATE">
    <rule [pattern="$PATTERN"]>
      [<$EMITTER ...>]
      [<$MUTATOR ...>]
    </rule>
  </state>
</rules>

eg. Include("String") would become:

<rule>
  <include state="String" />
</rule>

[null, null, {"kind": "include", "state": "String"}]

eg. Rule{`\d+`, Text, nil} would become:

<rule pattern="\\d+">
  <token type="Text"/>
</rule>

eg. Rule{`"`, String, Push("String")}

<rule pattern="\"">
  <token type="String" />
  <push state="String" />
</rule>

eg. Rule{`(\w+)(\n)`, ByGroups(Keyword, Whitespace), nil},

<rule pattern="(\\w+)(\\n)">
  <bygroups token="Keyword" token="Whitespace" />
  <push state="String" />
</rule>

View Source

var (
	StandardTypes = map[TokenType]string{
		Background:       "bg",
		PreWrapper:       "chroma",
		Line:             "line",
		LineNumbers:      "ln",
		LineNumbersTable: "lnt",
		LineHighlight:    "hl",
		LineTable:        "lntable",
		LineTableTD:      "lntd",
		LineLink:         "lnlinks",
		CodeLine:         "cl",
		Text:             "",
		Whitespace:       "w",
		Error:            "err",
		Other:            "x",

		Keyword:            "k",
		KeywordConstant:    "kc",
		KeywordDeclaration: "kd",
		KeywordNamespace:   "kn",
		KeywordPseudo:      "kp",
		KeywordReserved:    "kr",
		KeywordType:        "kt",

		Name:                 "n",
		NameAttribute:        "na",
		NameBuiltin:          "nb",
		NameBuiltinPseudo:    "bp",
		NameClass:            "nc",
		NameConstant:         "no",
		NameDecorator:        "nd",
		NameEntity:           "ni",
		NameException:        "ne",
		NameFunction:         "nf",
		NameFunctionMagic:    "fm",
		NameProperty:         "py",
		NameLabel:            "nl",
		NameNamespace:        "nn",
		NameOther:            "nx",
		NameTag:              "nt",
		NameVariable:         "nv",
		NameVariableClass:    "vc",
		NameVariableGlobal:   "vg",
		NameVariableInstance: "vi",
		NameVariableMagic:    "vm",

		Literal:     "l",
		LiteralDate: "ld",

		String:          "s",
		StringAffix:     "sa",
		StringBacktick:  "sb",
		StringChar:      "sc",
		StringDelimiter: "dl",
		StringDoc:       "sd",
		StringDouble:    "s2",
		StringEscape:    "se",
		StringHeredoc:   "sh",
		StringInterpol:  "si",
		StringOther:     "sx",
		StringRegex:     "sr",
		StringSingle:    "s1",
		StringSymbol:    "ss",

		Number:            "m",
		NumberBin:         "mb",
		NumberFloat:       "mf",
		NumberHex:         "mh",
		NumberInteger:     "mi",
		NumberIntegerLong: "il",
		NumberOct:         "mo",

		Operator:     "o",
		OperatorWord: "ow",

		Punctuation: "p",

		Comment:            "c",
		CommentHashbang:    "ch",
		CommentMultiline:   "cm",
		CommentPreproc:     "cp",
		CommentPreprocFile: "cpf",
		CommentSingle:      "c1",
		CommentSpecial:     "cs",

		Generic:           "g",
		GenericDeleted:    "gd",
		GenericEmph:       "ge",
		GenericError:      "gr",
		GenericHeading:    "gh",
		GenericInserted:   "gi",
		GenericOutput:     "go",
		GenericPrompt:     "gp",
		GenericStrong:     "gs",
		GenericSubheading: "gu",
		GenericTraceback:  "gt",
		GenericUnderline:  "gl",
	}
)

Marshal a RegexLexer to XML.

func SplitTokensIntoLines(tokens []Token) (out [][]Token)

SplitTokensIntoLines splits tokens containing newlines in two.

func Stringify(tokens ...Token) string

Stringify returns the raw string for a set of tokens.

func TokenTypeStrings() []string

TokenTypeStrings returns a slice of all String values of the enum

Words creates a regex that matches any of the given literal words.

type AnalyseConfig struct {
	Regexes []RegexConfig `xml:"regex,omitempty"`
	
	First bool `xml:"first,attr"`
}

AnalyseConfig defines the list of regexes analysers.

Analyser determines how appropriate this lexer is for the given text.

Colour represents an RGB colour.

func MustParseColour(colour string) Colour

MustParseColour is like ParseColour except it panics if the colour is invalid.

Will panic if colour is in an invalid format.

func NewColour(r, g, b uint8) Colour

NewColour creates a Colour directly from RGB values.

ParseColour in the forms #rgb, #rrggbb, #ansi<colour>, or #<colour>. Will return an "unset" colour if invalid.

Blue component of colour.

func (c Colour) BrightenOrDarken(factor float64) Colour

BrightenOrDarken brightens a colour if it is < 0.5 brightness or darkens if > 0.5 brightness.

Brightness of the colour (roughly) in the range 0.0 to 1.0.

func (c Colour) ClampBrightness(min, max float64) Colour

ClampBrightness returns a copy of this colour with its brightness adjusted such that it falls within the range [min, max] (or very close to it due to rounding errors). The supplied values use the same [0.0, 1.0] range as Brightness.

Distance between this colour and another.

This uses the approach described here (https://www.compuphase.com/cmetric.htm). This is not as accurate as LAB, et. al. but is *vastly* simpler and sufficient for our needs.

Green component of colour.

IsSet returns true if the colour is set.

Colours is an orderable set of colours.

func (c Colours) Len() int
func (c Colours) Swap(i, j int)

A CompiledRule is a Rule with a pre-compiled regex.

Note that regular expressions are lazily compiled on first use of the lexer.

CompiledRules is a map of rule name to sequence of compiled rules in that rule.

type Config struct {
	
	Name string `xml:"name,omitempty"`

	
	Aliases []string `xml:"alias,omitempty"`

	
	Filenames []string `xml:"filename,omitempty"`

	
	AliasFilenames []string `xml:"alias_filename,omitempty"`

	
	MimeTypes []string `xml:"mime_type,omitempty"`

	
	CaseInsensitive bool `xml:"case_insensitive,omitempty"`

	
	DotAll bool `xml:"dot_all,omitempty"`

	
	
	
	NotMultiline bool `xml:"not_multiline,omitempty"`

	
	
	EnsureNL bool `xml:"ensure_nl,omitempty"`

	
	
	
	Priority float32 `xml:"priority,omitempty"`

	
	
	
	
	
	Analyse *AnalyseConfig `xml:"analyse,omitempty"`
}

Config for a lexer.

type Emitter interface {
	
	Emit(groups []string, state *LexerState) Iterator
}

An Emitter takes group matches and returns tokens.

func ByGroupNames(emitters map[string]Emitter) Emitter

ByGroupNames emits a token for each named matching group in the rule's regex.

func ByGroups(emitters ...Emitter) Emitter

ByGroups emits a token for each matching group in the rule's regex.

Using returns an Emitter that uses a given Lexer reference for parsing and emitting.

The referenced lexer must be stored in the same LexerRegistry.

func UsingByGroup(sublexerNameGroup, codeGroup int, emitters ...Emitter) Emitter

UsingByGroup emits tokens for the matched groups in the regex using a sublexer. Used when lexing code blocks where the name of a sublexer is contained within the block, for example on a Markdown text block or SQL language block.

An attempt to load the sublexer will be made using the captured value from the text of the matched sublexerNameGroup. If a sublexer matching the sublexerNameGroup is available, then tokens for the matched codeGroup will be emitted using the sublexer. Otherwise, if no sublexer is available, then tokens will be emitted from the passed emitter.

Example:

var Markdown = internal.Register(MustNewLexer(
	&Config{
		Name:      "markdown",
		Aliases:   []string{"md", "mkd"},
		Filenames: []string{"*.md", "*.mkd", "*.markdown"},
		MimeTypes: []string{"text/x-markdown"},
	},
	Rules{
		"root": {
			{"^(```)(\\w+)(\\n)([\\w\\W]*?)(^```$)",
				UsingByGroup(
					2, 4,
					String, String, String, Text, String,
				),
				nil,
			},
		},
	},
))

See the lexers/markdown.go for the complete example.

Note: panic's if the number of emitters does not equal the number of matched groups in the regex.

func UsingLexer(lexer Lexer) Emitter

UsingLexer returns an Emitter that uses a given Lexer for parsing and emitting.

This Emitter is not serialisable.

UsingSelf is like Using, but uses the current Lexer.

type EmitterFunc func(groups []string, state *LexerState) Iterator

EmitterFunc is a function that is an Emitter.

type Formatter interface {
	
	
	
	Format(w io.Writer, style *Style, iterator Iterator) error
}

A Formatter for Chroma lexers.

func RecoveringFormatter(formatter Formatter) Formatter

RecoveringFormatter wraps a formatter with panic recovery.

A FormatterFunc is a Formatter implemented as a function.

Guards against iterator panics.

type Iterator func() Token

An Iterator across tokens.

EOF will be returned at the end of the Token stream.

If an error occurs within an Iterator, it may propagate this in a panic. Formatters should recover.

func Concaterator(iterators ...Iterator) Iterator

Concaterator concatenates tokens from a series of iterators.

func Literator(tokens ...Token) Iterator

Literator converts a sequence of literal Tokens into an Iterator.

func (i Iterator) Stdlib() func(yield func(Token) bool)

Stdlib converts a Chroma iterator to a Go 1.23-compatible iterator.

func (i Iterator) Tokens() []Token

Tokens consumes all tokens from the iterator and returns them as a slice.

A Lexer for tokenising source code.

func Coalesce(lexer Lexer) Lexer

Coalesce is a Lexer interceptor that collapses runs of common types into a single token.

func DelegatingLexer(root Lexer, language Lexer) Lexer

DelegatingLexer combines two lexers to handle the common case of a language embedded inside another, such as PHP inside HTML or PHP inside plain text.

It takes two lexer as arguments: a root lexer and a language lexer. First everything is scanned using the language lexer, which must return "Other" for unrecognised tokens. Then all "Other" tokens are lexed using the root lexer. Finally, these two sets of tokens are merged.

The lexers from the template lexer package use this base lexer.

func RemappingLexer(lexer Lexer, mapper func(Token) []Token) Lexer

RemappingLexer remaps a token to a set of, potentially empty, tokens.

func TypeRemappingLexer(lexer Lexer, mapping TypeMapping) Lexer

TypeRemappingLexer remaps types of tokens coming from a parent Lexer.

eg. Map "defvaralias" tokens of type NameVariable to NameFunction:

mapping := TypeMapping{
	{NameVariable, NameFunction, []string{"defvaralias"},
}
lexer = TypeRemappingLexer(lexer, mapping)
type LexerMutator interface {
	
	
	
	MutateLexer(rules CompiledRules, state string, rule int) error
}

A LexerMutator is an additional interface that a Mutator can implement to modify the lexer when it is compiled.

type LexerRegistry struct {
	Lexers Lexers
	
}

LexerRegistry is a registry of Lexers.

func NewLexerRegistry() *LexerRegistry

NewLexerRegistry creates a new LexerRegistry of Lexers.

Aliases of all the lexers, and skip those lexers who do not have any aliases, or show their name instead

Analyse text content and return the "best" lexer..

Get a Lexer by name, alias or file extension.

Match returns the first lexer matching filename.

Note that this iterates over all file patterns in all lexers, so is not fast.

func (l *LexerRegistry) MatchMimeType(mimeType string) Lexer

MatchMimeType attempts to find a lexer for the given MIME type.

Names of all lexers, optionally including aliases.

func (l *LexerRegistry) Register(lexer Lexer) Lexer

Register a Lexer with the LexerRegistry. If the lexer is already registered it will be replaced.

LexerState contains the state for a single lex.

func (l *LexerState) Get(key interface{}) interface{}

Get mutator context.

func (l *LexerState) Iterator() Token

Iterator returns the next Token from the lexer.

func (l *LexerState) Set(key interface{}, value interface{})

Set mutator context.

Lexers is a slice of lexers sortable by name.

func (l Lexers) Len() int
func (l Lexers) Swap(i, j int)
type Mutator interface {
	
	Mutate(state *LexerState) error
}

A Mutator modifies the behaviour of the lexer.

Combined creates a new anonymous state from the given states, and pushes that state.

func Mutators(modifiers ...Mutator) Mutator

Mutators applies a set of Mutators in order.

Pop state from the stack when rule matches.

Push states onto the stack.

type MutatorFunc func(state *LexerState) error

A MutatorFunc is a Mutator that mutates the lexer state machine as it is processing.

type PrioritisedLexers []Lexer

PrioritisedLexers is a slice of lexers sortable by priority.

func (l PrioritisedLexers) Swap(i, j int)
type RegexConfig struct {
	Pattern string  `xml:"pattern,attr"`
	Score   float32 `xml:"score,attr"`
}

RegexConfig defines a single regex pattern and its score in case of match.

type RegexLexer struct {
	
}

RegexLexer is the default lexer implementation used in Chroma.

func MustNewLexer(config *Config, rules func() Rules) *RegexLexer

MustNewLexer creates a new Lexer with deferred rules generation or panics.

MustNewXMLLexer constructs a new RegexLexer from an XML file or panics.

func NewLexer(config *Config, rulesFunc func() Rules) (*RegexLexer, error)

NewLexer creates a new regex-based Lexer.

"rules" is a state machine transition map. Each key is a state. Values are sets of rules that match input, optionally modify lexer state, and output tokens.

NewXMLLexer creates a new RegexLexer from a serialised RegexLexer.

Unmarshal a RegexLexer from XML.

AnalyseText scores how likely a fragment of text is to match this lexer, between 0.0 and 1.0.

func (r *RegexLexer) Config() *Config

Config returns the Config for this Lexer.

func (r *RegexLexer) MustRules() Rules

MustRules is like Rules() but will panic on error.

SetAnalyser sets the analyser function used to perform content inspection.

func (r *RegexLexer) SetConfig(config *Config) *RegexLexer

SetConfig replaces the Config for this Lexer.

func (r *RegexLexer) SetRegistry(registry *LexerRegistry) Lexer

SetRegistry the lexer will use to lookup other lexers if necessary.

func (r *RegexLexer) SetTracing(trace bool)

SetTracing enables debug tracing.

This complies with the TracingLexer interface.

Tokenise text using lexer, returning an iterator.

func (r *RegexLexer) Trace(trace bool) *RegexLexer

Trace enables debug tracing.

Deprecated: Use SetTracing instead.

type Rule struct {
	Pattern string
	Type    Emitter
	Mutator Mutator
}

A Rule is the fundamental matching unit of the Regex lexer state machine.

func Default(mutators ...Mutator) Rule

Default returns a Rule that applies a set of Mutators.

Rules maps from state to a sequence of Rules.

func (r Rules) Clone() Rules

Clone returns a clone of the Rules.

func (r Rules) Merge(rules Rules) Rules

Merge creates a clone of "r" then merges "rules" into the clone.

func (r Rules) Rename(oldRule, newRule string) Rules

Rename clones rules then a rule.

type SerialisableEmitter interface {
	Emitter
	EmitterKind() string
}

SerialisableEmitter is an Emitter that can be serialised and deserialised to/from JSON.

type SerialisableMutator interface {
	Mutator
	MutatorKind() string
}

SerialisableMutator is a Mutator that can be serialised and deserialised.

type Style struct {
	Name string
	
}

A Style definition.

See http://pygments.org/docs/styles/ for details. Semantics are intended to be identical.

func MustNewStyle(name string, entries StyleEntries) *Style

MustNewStyle creates a new style or panics.

MustNewXMLStyle is like NewXMLStyle but panics on error.

NewStyle creates a new style definition.

NewXMLStyle parses an XML style definition.

func (s *Style) Builder() *StyleBuilder

Builder creates a mutable builder from this Style.

The builder can then be safely modified. This is a cheap operation.

func (s *Style) Get(ttype TokenType) StyleEntry

Get a style entry. Will try sub-category or category if an exact match is not found, and finally return the Background.

func (s *Style) Has(ttype TokenType) bool

Has checks if an exact style entry match exists for a token type.

This is distinct from Get() which will merge parent tokens.

func (s *Style) Types() []TokenType

Types that are styled.

type StyleBuilder struct {
	
}

A StyleBuilder is a mutable structure for building styles.

Once built, a Style is immutable.

func (s *StyleBuilder) AddAll(entries StyleEntries) *StyleBuilder
func (s *StyleBuilder) AddEntry(ttype TokenType, entry StyleEntry) *StyleBuilder
func (s *StyleBuilder) Get(ttype TokenType) StyleEntry
func (s *StyleBuilder) Transform(transform func(StyleEntry) StyleEntry) *StyleBuilder

Transform passes each style entry currently defined in the builder to the supplied function and saves the returned value. This can be used to adjust a style's colours; see Colour's ClampBrightness function, for example.

StyleEntries mapping TokenType to colour definition.

type StyleEntry struct {
	
	Colour     Colour
	Background Colour
	Border     Colour

	Bold      Trilean
	Italic    Trilean
	Underline Trilean
	NoInherit bool
}

A StyleEntry in the Style map.

func MustParseStyleEntry(entry string) StyleEntry

MustParseStyleEntry parses a Pygments style entry or panics.

ParseStyleEntry parses a Pygments style entry.

func (s StyleEntry) Inherit(ancestors ...StyleEntry) StyleEntry

Inherit styles from ancestors.

Ancestors should be provided from oldest to newest.

func (s StyleEntry) Sub(e StyleEntry) StyleEntry

Sub subtracts e from s where elements match.

type Token struct {
	Type  TokenType `json:"type"`
	Value string    `json:"value"`
}

Token output to formatter.

EOF is returned by lexers at the end of input.

Tokenise text using lexer, returning tokens as a slice.

func (t *Token) Clone() Token

Clone returns a clone of the Token.

TokenType is the type of token to highlight.

It is also an Emitter, emitting a single token of itself

const (
	
	Background TokenType = -1 - iota
	
	PreWrapper
	
	Line
	
	LineNumbers
	
	LineNumbersTable
	
	LineHighlight
	
	LineTable
	
	LineTableTD
	
	LineLink
	
	CodeLine
	
	Error
	
	Other
	
	None
	
	Ignore
	
	EOFType TokenType = 0
)

Meta token types.

const (
	Keyword TokenType = 1000 + iota
	KeywordConstant
	KeywordDeclaration
	KeywordNamespace
	KeywordPseudo
	KeywordReserved
	KeywordType
)

Keywords.

const (
	Name TokenType = 2000 + iota
	NameAttribute
	NameClass
	NameConstant
	NameDecorator
	NameEntity
	NameException
	NameKeyword
	NameLabel
	NameNamespace
	NameOperator
	NameOther
	NamePseudo
	NameProperty
	NameTag
)

Names.

const (
	NameBuiltin TokenType = 2100 + iota
	NameBuiltinPseudo
)

Builtin names.

const (
	NameVariable TokenType = 2200 + iota
	NameVariableAnonymous
	NameVariableClass
	NameVariableGlobal
	NameVariableInstance
	NameVariableMagic
)

Variable names.

const (
	NameFunction TokenType = 2300 + iota
	NameFunctionMagic
)

Function names.

const (
	Literal TokenType = 3000 + iota
	LiteralDate
	LiteralOther
)

Literals.

const (
	LiteralString TokenType = 3100 + iota
	LiteralStringAffix
	LiteralStringAtom
	LiteralStringBacktick
	LiteralStringBoolean
	LiteralStringChar
	LiteralStringDelimiter
	LiteralStringDoc
	LiteralStringDouble
	LiteralStringEscape
	LiteralStringHeredoc
	LiteralStringInterpol
	LiteralStringName
	LiteralStringOther
	LiteralStringRegex
	LiteralStringSingle
	LiteralStringSymbol
)

Strings.

const (
	LiteralNumber TokenType = 3200 + iota
	LiteralNumberBin
	LiteralNumberFloat
	LiteralNumberHex
	LiteralNumberInteger
	LiteralNumberIntegerLong
	LiteralNumberOct
	LiteralNumberByte
)

Literals.

const (
	Operator TokenType = 4000 + iota
	OperatorWord
)

Operators.

const (
	Generic TokenType = 7000 + iota
	GenericDeleted
	GenericEmph
	GenericError
	GenericHeading
	GenericInserted
	GenericOutput
	GenericPrompt
	GenericStrong
	GenericSubheading
	GenericTraceback
	GenericUnderline
)

Generic tokens.

const (
	Text TokenType = 8000 + iota
	TextWhitespace
	TextSymbol
	TextPunctuation
)

Text.

const (
	Punctuation TokenType = 5000 + iota
)

Punctuation.

TokenTypeString retrieves an enum value from the enum constants string name. Throws an error if the param is not part of the enum.

func TokenTypeValues() []TokenType

TokenTypeValues returns all values of the enum

func (t TokenType) Category() TokenType
func (t TokenType) InCategory(other TokenType) bool
func (t TokenType) InSubCategory(other TokenType) bool
func (i TokenType) IsATokenType() bool

IsATokenType returns "true" if the value is listed in the enum definition. "false" otherwise

MarshalText implements the encoding.TextMarshaler interface for TokenType

func (t TokenType) Parent() TokenType
func (t TokenType) SubCategory() TokenType

UnmarshalText implements the encoding.TextUnmarshaler interface for TokenType

type TokeniseOptions struct {
	
	State string
	
	Nested bool

	
	
	EnsureLF bool
}

TokeniseOptions contains options for tokenisers.

type Trace struct {
	Lexer   string  `json:"lexer"`
	State   string  `json:"state"`
	Rule    int     `json:"rule"`
	Pattern string  `json:"pattern"`
	Pos     int     `json:"pos"`
	Length  int     `json:"length"`
	Elapsed float64 `json:"elapsedMs"` 
}

Trace is the trace of a tokenisation process.

type TracingLexer interface {
	Lexer
	SetTracing(enable bool)
}

TracingLexer is a Lexer that can trace its tokenisation process.

Trilean value for StyleEntry value inheritance.

const (
	Pass Trilean = iota
	Yes
	No
)

Trilean states.

Prefix returns s with "no" as a prefix if Trilean is no.

type TypeMapping []struct {
	From, To TokenType
	Words    []string
}

TypeMapping defines type maps for the TypeRemappingLexer.

type ValidatingEmitter interface {
	Emitter
	ValidateEmitter(rule *CompiledRule) error
}

ValidatingEmitter is an Emitter that can validate against a compiled rule.