Documentation ¶
Index ¶
- func FindRawTokenType(value, operators, separators string) text.TokenType
- func JoinMultiCharSymbols(l *list.List, multiChar string) *list.List
- func JoinQuote(l *list.List) *list.List
- func RegroupSymbols(l *list.List, symbols string) []string
- func SplitKeepSeparators(s, sep string) *list.List
- type SourceReader
- func (source *SourceReader) FilterLine(line *text.SourceLine, options TokenMatchingOptions) []text.Token
- func (source *SourceReader) HasNext() bool
- func (source *SourceReader) LineNumber() uint
- func (source *SourceReader) NextLine() *list.List
- func (source *SourceReader) NormalizeLine(line *list.List, lineNumber uint, symbols string) *text.SourceLine
- func (source *SourceReader) SourceName() string
- type TokenMatchingOptions
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
func FindRawTokenType ¶
func JoinMultiCharSymbols ¶
JoinMultiCharSymbols join symbols with more than 1 char like && or >=. Max len of symbol is 2 <symbol padded> = <symbol> <space to next even position>, multiMatch format: "<symbol padded><symbol padded>...". example: "+ - ++ -- & |"
func RegroupSymbols ¶
RegroupSymbols regroup symbols split after NextLine() multiMatch format: "<symbol><space>...". example: "+ - ++ -- & |"
func SplitKeepSeparators ¶
SplitKeepSeparators split a string considering single char separators like StringTokenizer in java, return a list with the tokens and delimiters
Types ¶
type SourceReader ¶
type SourceReader struct {
// contains filtered or unexported fields
}
SourceReader read the source file/string and split line into tokens (raw)
func NewSourceReader ¶
func NewSourceReader(reader io.Reader, separators, sourceName string) *SourceReader
NewSourceReader create a SourceReader from a reader and a separator string
func NewSourceReaderFromFile ¶
func NewSourceReaderFromFile(filePath, separators string) *SourceReader
NewSourceReaderFromFile create a SourceReader from a file and a separators string
func (*SourceReader) FilterLine ¶
func (source *SourceReader) FilterLine(line *text.SourceLine, options TokenMatchingOptions) []text.Token
FilterLine remove single line comments and filter whitespace, return a slice of tokens
func (*SourceReader) HasNext ¶
func (source *SourceReader) HasNext() bool
HasNext return true if the end of file is reached
func (*SourceReader) LineNumber ¶
func (source *SourceReader) LineNumber() uint
LineNumber return, yes your guess is right: the line number, but go lint complain if i don't write it
func (*SourceReader) NextLine ¶
func (source *SourceReader) NextLine() *list.List
NextLine read the line and then split in tokens (raw), return a list
func (*SourceReader) NormalizeLine ¶
func (source *SourceReader) NormalizeLine(line *list.List, lineNumber uint, symbols string) *text.SourceLine
NormalizeLine process the raw tokens in line regrouping quoted strings and multi-char operators
func (*SourceReader) SourceName ¶
func (source *SourceReader) SourceName() string
SourceName return the source name
type TokenMatchingOptions ¶
type TokenMatchingOptions struct {
// contains filtered or unexported fields
}
TokenMatchingOptions contains definition of delimiters and operators for the lexer
func NewMatchingOptions ¶
func NewMatchingOptions(operators, separators, multiCharSymbols, lineCommentChar string) TokenMatchingOptions
func (TokenMatchingOptions) GetAllSeparators ¶
func (opt TokenMatchingOptions) GetAllSeparators() string
func (TokenMatchingOptions) GetMultiCharSymbols ¶
func (opt TokenMatchingOptions) GetMultiCharSymbols() string
func (TokenMatchingOptions) GetOperators ¶
func (opt TokenMatchingOptions) GetOperators() string
func (TokenMatchingOptions) LineToTokens ¶
func (opt TokenMatchingOptions) LineToTokens(l *text.SourceLine, lineNumber uint, fileName string) []text.Token
Convert the line to a slice of tokens, also skip whitespaces and comments search why fileName was used