From acdf83332bc826fb59a2a1603954bc0a2d5fbf79 Mon Sep 17 00:00:00 2001 From: Maurice Makaay Date: Sat, 20 Jul 2019 11:50:36 +0000 Subject: [PATCH] Use pointers instead of values, since we're updating the structs. --- tokenize/api.go | 8 ++++---- tokenize/api_input.go | 26 +++++++++++++------------- tokenize/api_output.go | 31 +++++++++++++++---------------- 3 files changed, 32 insertions(+), 33 deletions(-) diff --git a/tokenize/api.go b/tokenize/api.go index af78b9c..e9ac3c1 100644 --- a/tokenize/api.go +++ b/tokenize/api.go @@ -72,8 +72,8 @@ type API struct { stackFrames []stackFrame // the stack frames, containing stack level-specific data stackLevel int // the current stack level stackFrame *stackFrame // the current stack frame - Input Input // provides input-related functionality - Output Output // provides output-related functionality + Input *Input // provides input-related functionality + Output *Output // provides output-related functionality } type stackFrame struct { @@ -100,11 +100,11 @@ func NewAPI(input interface{}) *API { api := &API{ stackFrames: make([]stackFrame, initialStackDepth), } - api.Input = Input{ + api.Input = &Input{ api: api, reader: read.New(input), } - api.Output = Output{ + api.Output = &Output{ api: api, data: make([]byte, initialByteStoreLength), tokens: make([]Token, initialTokenStoreLength), diff --git a/tokenize/api_input.go b/tokenize/api_input.go index 656e936..aff6cee 100644 --- a/tokenize/api_input.go +++ b/tokenize/api_input.go @@ -16,7 +16,7 @@ type Input struct { // Reset moves the input cursor back to the beginning for the currently active API child. // Aditionally, any output (bytes and tokens) that was emitted from the API child are // cleared as well. -func (i Input) Reset() { +func (i *Input) Reset() { if i.api.stackLevel == 0 { i.api.stackFrame.column = 0 i.api.stackFrame.line = 0 @@ -32,7 +32,7 @@ func (i Input) Reset() { i.api.stackFrame.err = nil } -func (i Input) Cursor() string { +func (i *Input) Cursor() string { if i.api.stackFrame.line == 0 && i.api.stackFrame.column == 0 { return fmt.Sprintf("start of file") } @@ -44,7 +44,7 @@ func (i Input) Cursor() string { // When an error occurs during reading the input, an error will be returned. // When an offset is requested that is beyond the length of the available input // data, then the error will be io.EOF. -func (i Input) PeekByte(offset int) (byte, error) { +func (i *Input) PeekByte(offset int) (byte, error) { return i.reader.ByteAt(i.api.stackFrame.offset + offset) } @@ -58,7 +58,7 @@ func (i Input) PeekByte(offset int) (byte, error) { // // After the call, byte offset 0 for PeekByte() and PeekRune() will point at // the first byte after the skipped byte. -func (i Input) SkipByte(b byte) { +func (i *Input) SkipByte(b byte) { i.api.stackFrame.moveCursorByByte(b) i.api.stackFrame.offset++ } @@ -73,7 +73,7 @@ func (i Input) SkipByte(b byte) { // // After the call, byte offset 0 for PeekByte() and PeekRune() will point at // the first byte after the skipped bytes. -func (i Input) SkipBytes(bytes ...byte) { +func (i *Input) SkipBytes(bytes ...byte) { for _, b := range bytes { i.api.stackFrame.moveCursorByByte(b) i.api.stackFrame.offset++ @@ -91,7 +91,7 @@ func (i Input) SkipBytes(bytes ...byte) { // // After the call, byte offset 0 for PeekByte() and PeekRune() will point at // the first byte after the accepted byte. -func (i Input) AcceptByte(b byte) { +func (i *Input) AcceptByte(b byte) { curBytesEnd := i.api.stackFrame.bytesEnd maxRequiredBytes := curBytesEnd + 1 @@ -119,7 +119,7 @@ func (i Input) AcceptByte(b byte) { // // After the call, byte offset 0 for PeekByte() and PeekRune() will point at // the first byte after the accepted bytes. -func (i Input) AcceptBytes(bytes ...byte) { +func (i *Input) AcceptBytes(bytes ...byte) { curBytesEnd := i.api.stackFrame.bytesEnd newBytesEnd := curBytesEnd + len(bytes) @@ -151,7 +151,7 @@ func (i Input) AcceptBytes(bytes ...byte) { // When an error occurs during reading the input, an error will be returned. // When an offset is requested that is beyond the length of the available input // data, then the error will be io.EOF. -func (i Input) PeekRune(offset int) (rune, int, error) { +func (i *Input) PeekRune(offset int) (rune, int, error) { return i.reader.RuneAt(i.api.stackFrame.offset + offset) } @@ -165,7 +165,7 @@ func (i Input) PeekRune(offset int) (rune, int, error) { // // After the call, byte offset 0 for PeekByte() and PeekRune() will point at // the first byte after the skipped rune. -func (i Input) SkipRune(r rune) { +func (i *Input) SkipRune(r rune) { i.api.stackFrame.moveCursorByRune(r) i.api.stackFrame.offset += utf8.RuneLen(r) } @@ -180,7 +180,7 @@ func (i Input) SkipRune(r rune) { // // After the call, byte offset 0 for PeekByte() and PeekRune() will point at // the first byte after the skipped runes. -func (i Input) SkipRunes(runes ...rune) { +func (i *Input) SkipRunes(runes ...rune) { for _, r := range runes { i.api.stackFrame.moveCursorByRune(r) i.api.stackFrame.offset += utf8.RuneLen(r) @@ -198,7 +198,7 @@ func (i Input) SkipRunes(runes ...rune) { // // After the call, byte offset 0 for PeekByte() and PeekRune() will point at // the first byte after the accepted rune. -func (i Input) AcceptRune(r rune) { +func (i *Input) AcceptRune(r rune) { curBytesEnd := i.api.stackFrame.bytesEnd maxRequiredBytes := curBytesEnd + utf8.UTFMax @@ -226,7 +226,7 @@ func (i Input) AcceptRune(r rune) { // // After the call, byte offset 0 for PeekByte() and PeekRune() will point at // the first byte after the accepted runes. -func (i Input) AcceptRunes(runes ...rune) { +func (i *Input) AcceptRunes(runes ...rune) { runesAsString := string(runes) byteLen := len(runesAsString) curBytesEnd := i.api.stackFrame.bytesEnd @@ -254,7 +254,7 @@ func (i Input) AcceptRunes(runes ...rune) { // Note: // When writing your own TokenHandler, you normally won't have to call this // method yourself. It is automatically called by parsekit when possible. -func (i Input) Flush() bool { +func (i *Input) Flush() bool { if i.api.stackFrame.offset > 0 { i.reader.Flush(i.api.stackFrame.offset) i.api.stackFrame.offset = 0 diff --git a/tokenize/api_output.go b/tokenize/api_output.go index 70cbeac..33e751e 100644 --- a/tokenize/api_output.go +++ b/tokenize/api_output.go @@ -11,31 +11,31 @@ type Output struct { data []byte // accepted data } -func (o Output) String() string { +func (o *Output) String() string { bytes := o.data[o.api.stackFrame.bytesStart:o.api.stackFrame.bytesEnd] return string(bytes) } -func (o Output) Runes() []rune { +func (o *Output) Runes() []rune { bytes := o.data[o.api.stackFrame.bytesStart:o.api.stackFrame.bytesEnd] return []rune(string(bytes)) } -func (o Output) Rune(offset int) rune { +func (o *Output) Rune(offset int) rune { r, _ := utf8.DecodeRune(o.data[o.api.stackFrame.bytesStart+offset:]) return r } -func (o Output) ClearData() { +func (o *Output) ClearData() { o.api.stackFrame.bytesEnd = o.api.stackFrame.bytesStart } -func (o Output) SetBytes(bytes ...byte) { +func (o *Output) SetBytes(bytes ...byte) { o.ClearData() o.AddBytes(bytes...) } -func (o Output) AddBytes(bytes ...byte) { +func (o *Output) AddBytes(bytes ...byte) { // Grow the runes capacity when needed. newBytesEnd := o.api.stackFrame.bytesEnd + len(bytes) if cap(o.data) < newBytesEnd { @@ -53,7 +53,7 @@ func (o Output) SetRunes(runes ...rune) { o.AddRunes(runes...) } -func (o Output) AddRunes(runes ...rune) { +func (o *Output) AddRunes(runes ...rune) { // Grow the runes capacity when needed. runesAsString := string(runes) newBytesEnd := o.api.stackFrame.bytesEnd + len(runesAsString) @@ -67,37 +67,37 @@ func (o Output) AddRunes(runes ...rune) { o.api.stackFrame.bytesEnd = newBytesEnd } -func (o Output) AddString(s string) { +func (o *Output) AddString(s string) { o.AddBytes([]byte(s)...) } -func (o Output) SetString(s string) { +func (o *Output) SetString(s string) { o.ClearData() o.SetBytes([]byte(s)...) } -func (o Output) Tokens() []Token { +func (o *Output) Tokens() []Token { return o.tokens[o.api.stackFrame.tokenStart:o.api.stackFrame.tokenEnd] } -func (o Output) Token(offset int) Token { +func (o *Output) Token(offset int) Token { return o.tokens[o.api.stackFrame.tokenStart+offset] } -func (o Output) TokenValue(offset int) interface{} { +func (o *Output) TokenValue(offset int) interface{} { return o.tokens[o.api.stackFrame.tokenStart+offset].Value } -func (o Output) ClearTokens() { +func (o *Output) ClearTokens() { o.api.stackFrame.tokenEnd = o.api.stackFrame.tokenStart } -func (o Output) SetTokens(tokens ...Token) { +func (o *Output) SetTokens(tokens ...Token) { o.ClearTokens() o.AddTokens(tokens...) } -func (o Output) AddTokens(tokens ...Token) { +func (o *Output) AddTokens(tokens ...Token) { // Grow the tokens capacity when needed. newTokenEnd := o.api.stackFrame.tokenEnd + len(tokens) if cap(o.tokens) < newTokenEnd { @@ -105,7 +105,6 @@ func (o Output) AddTokens(tokens ...Token) { copy(newTokens, o.tokens) o.tokens = newTokens } - for offset, t := range tokens { o.tokens[o.api.stackFrame.tokenEnd+offset] = t }