summaryrefslogtreecommitdiff
path: root/src/tokenizer.go
diff options
context:
space:
mode:
authorJunegunn Choi <junegunn.c@gmail.com>2025-02-09 11:53:35 +0900
committerJunegunn Choi <junegunn.c@gmail.com>2025-02-09 11:53:35 +0900
commit2b584586ed1caf15429625da981575ee35d407b8 (patch)
tree6a4458be6e685f944214dc4c243234b7b8065cd7 /src/tokenizer.go
parenta1994ff0abb48dfe4c6951ad67e837f4c767cc39 (diff)
downloadfzf-2b584586ed1caf15429625da981575ee35d407b8.tar.gz
Add --accept-nth option to transform the output
This option can be used to replace a sed or awk in the post-processing step. ps -ef | fzf --multi --header-lines 1 | awk '{print $2}' ps -ef | fzf --multi --header-lines 1 --accept-nth 2 This may not be a very "Unix-y" thing to do, so I've always felt that fzf shouldn't have such an option, but I've finally changed my mind because: * fzf can be configured with a custom delimiter that is a fixed string or a regular expression. * In such cases, you'd need to repeat the delimiter again in the post-processing step. * Also, tools like awk or sed may interpret a regular expression differently, causing mismatches. You can still use sed, cut, or awk if you prefer. Close #3987 Close #1323
Diffstat (limited to 'src/tokenizer.go')
-rw-r--r--src/tokenizer.go33
1 files changed, 31 insertions, 2 deletions
diff --git a/src/tokenizer.go b/src/tokenizer.go
index f5d1483b..057d7405 100644
--- a/src/tokenizer.go
+++ b/src/tokenizer.go
@@ -211,7 +211,36 @@ func Tokenize(text string, delimiter Delimiter) []Token {
return withPrefixLengths(tokens, 0)
}
-func joinTokens(tokens []Token) string {
+// StripLastDelimiter removes the trailing delimiter and whitespaces from the
+// last token.
+func StripLastDelimiter(tokens []Token, delimiter Delimiter) []Token {
+ if len(tokens) == 0 {
+ return tokens
+ }
+
+ lastToken := tokens[len(tokens)-1]
+
+ if delimiter.str == nil && delimiter.regex == nil {
+ lastToken.text.TrimTrailingWhitespaces()
+ } else {
+ if delimiter.str != nil {
+ lastToken.text.TrimSuffix([]rune(*delimiter.str))
+ } else if delimiter.regex != nil {
+ str := lastToken.text.ToString()
+ locs := delimiter.regex.FindAllStringIndex(str, -1)
+ if len(locs) > 0 {
+ lastLoc := locs[len(locs)-1]
+ lastToken.text.SliceRight(lastLoc[0])
+ }
+ }
+ lastToken.text.TrimTrailingWhitespaces()
+ }
+
+ return tokens
+}
+
+// JoinTokens concatenates the tokens into a single string
+func JoinTokens(tokens []Token) string {
var output bytes.Buffer
for _, token := range tokens {
output.WriteString(token.text.ToString())
@@ -229,7 +258,7 @@ func Transform(tokens []Token, withNth []Range) []Token {
if r.begin == r.end {
idx := r.begin
if idx == rangeEllipsis {
- chars := util.ToChars(stringBytes(joinTokens(tokens)))
+ chars := util.ToChars(stringBytes(JoinTokens(tokens)))
parts = append(parts, &chars)
} else {
if idx < 0 {