package csvx import ( "encoding/csv" "os" ) // Append appends the given record to the end of the file specified by the filePath parameter. The record should be // a slice of strings, where each string represents a field of the record. If the file does not exist, it will be created. // If an error occurs during the operation, an error value will be returned. func Append(filePath string, record []string) error { // Check if the file exists if _, err := os.Stat(filePath); os.IsNotExist(err) { // Create the file if it doesn't exist _, err = os.Create(filePath) if err != nil { return err } } // Open the CSV file for writing and appending file, err := os.OpenFile(filePath, os.O_WRONLY|os.O_APPEND, 0644) if err != nil { return err } defer func(file *os.File) { _ = file.Close() }(file) // Create a new CSV writer and pass in the opened file writer := csv.NewWriter(file) defer writer.Flush() // Write the new row to the CSV file err = writer.Write(record) if err != nil { return err } return nil }
package csvx import ( "reflect" "strconv" "strings" ) const bom = "\ufeff" // RemoveDoubleQuote remove double quote (") and clear unicode from text func RemoveDoubleQuote(text string) string { text = ClearUnicode(text) if len(text) >= 2 && text[0] == '"' && text[len(text)-1] == '"' { text = text[1 : len(text)-1] } return text } // ClearUnicode clear unicode from text func ClearUnicode(text string) string { result := strings.TrimPrefix(text, bom) return strings.TrimSpace(result) } // IsFloat returns true if the given reflect.Type is a float32 or float64 type, and false otherwise. // This function can be used to check whether a given type is a floating-point type, which may be useful // for type assertions and other operations that require type checking. If the given type is not a valid // float type, this function will return false. func IsFloat(t reflect.Type) bool { if t.Kind() == reflect.Float32 || t == reflect.TypeOf(float64(0)) { return true } return false } // IsPointer checks whether the given interface is a pointer. // Returns true if the input is a pointer type, otherwise false. func IsPointer(t reflect.Type) bool { return t.Kind() == reflect.Pointer || t.Kind() == reflect.Ptr } // F64ToString converts the given float64 value to a string representation. // The resulting string will be formatted as a decimal number with up to 10 decimal places. // This function can be used to convert floating-point values to string values, which may be useful // for printing or other output operations. If the given value is NaN or infinite, the resulting string will reflect this. // If the given value is not representable as a finite decimal number, this function may return an inaccurate or nonsensical result. func F64ToString(num float64) string { return strconv.FormatFloat(num, 'f', -1, 64) }
package csvx import ( "bytes" "encoding/csv" "fmt" "reflect" "strconv" "strings" ) // Utf8BOM represents the Byte Order Mark (BOM) for UTF-8 encoding. // It is used to signal UTF-8 encoding in text files for better compatibility with tools like Excel. const Utf8BOM = "\uFEFF" // Format formats a slice of strings as a single, comma-separated string. Each element in the slice will be separated // by a comma and a space. This function can be used to generate formatted output for CSV files or other data formats // that use comma-separated values. If the input slice is empty, this function will return an empty string. func Format(cell []string) string { return strings.Join(cell, ",") } // Convert array struct to csv format // Struct supported // // type MyStruct struct { // Name string `json:"name" header:"Name" no:"2"` // ID int `json:"id" header:"ID" no:"1"` // } // // m := []MyStruct{{ID: 1, Name: "N1"}, {ID: 2, Name: "N2"}} // csv := csvx.Convert[MyStruct](m) // // Result: // // "ID","Name" // "1","N1" // "2","N2" func Convert[T any](data []T, ignoreDoubleQuote ...bool) string { size := len(data) if size > 0 { // Config format value valueFormatCore := "%v" valueFormat := "\"%v\"" if len(ignoreDoubleQuote) > 0 { valueFormat = valueFormatCore } // Initialize the element var headers []string rows := make([][]string, size) // Mapping sheets := []string{} for r, d := range data { el := reflect.ValueOf(&d).Elem() colsRaw := el.NumField() cols := 0 for c := 0; c < colsRaw; c++ { _, fOk := headerLookup[T](d, c) _, iOk := noLookup[T](d, c) if !fOk || !iOk { continue } cols++ } if headers == nil { headers = make([]string, cols) } if len(rows[r]) == 0 { rows[r] = make([]string, cols) } for c := 0; c < colsRaw; c++ { value := el.Field(c) field, fOk := headerLookup[T](d, c) index, iOk := noLookup[T](d, c) if !fOk || !iOk { continue } if i, err := strconv.Atoi(index); err == nil { if r == 0 { headers[i-1] = fmt.Sprintf(valueFormat, field) } if IsFloat(value.Type()) { rows[r][i-1] = fmt.Sprintf(valueFormat, F64ToString(value.Float())) } else { nValue := "" if IsPointer(value.Type()) { if value.Elem().IsValid() { nValue = RemoveDoubleQuote(fmt.Sprintf(valueFormatCore, value.Elem())) } } else { nValue = RemoveDoubleQuote(fmt.Sprintf(valueFormatCore, value)) } rows[r][i-1] = fmt.Sprintf(valueFormat, nValue) } } } // Convert array to csv format if len(sheets) == 0 { sheets = append(sheets, Format(headers)) } sheets = append(sheets, Format(rows[r])) } // Add enter end line result := strings.Join(sheets, "\n") return result } return "" } // ManualConvert performs a manual conversion of the input data. // It applies the specified rules or transformations to achieve the desired output. func ManualConvert[T any](data []T, headers []string, onRecord func(data T) []string) string { size := len(data) if size == 0 { return "" } var buffer bytes.Buffer w := csv.NewWriter(&buffer) _ = w.Write(headers) for _, d := range data { row := onRecord(d) _ = w.Write(row) } w.Flush() return fmt.Sprintf("%s%s", Utf8BOM, buffer.String()) } // TryConvert attempts to convert the input data to the specified format. // It handles errors gracefully and returns the converted result along with an error (if any). func TryConvert[T any](data []T, ignoreDoubleQuote ...bool) string { if len(data) == 0 { return "" } // Config format value valueFormatCore := "%v" valueFormat := "\"%v\"" if len(ignoreDoubleQuote) > 0 { valueFormat = valueFormatCore } // Use reflection to get the type of the struct t := reflect.TypeOf(data[0]) cols := 0 numField := t.NumField() hmap := make(map[int]string) nmap := make(map[int]int) for i := 0; i < numField; i++ { header, hOk := headerLookup[T](data[0], i) noStr, nOk := noLookup[T](data[0], i) if hOk && nOk { no, err := strconv.Atoi(noStr) if err != nil { continue } // Convert to index of array index := no - 1 nmap[index] = i hmap[index] = header cols++ } } var headers strings.Builder var records strings.Builder for r, d := range data { el := reflect.ValueOf(&d).Elem() for c := 0; c < cols; c++ { idx := nmap[c] // Header header := hmap[c] if r == 0 { headers.WriteString(fmt.Sprintf("%v", header)) if c < cols-1 { headers.WriteString(",") } } // Records field := el.Field(idx) if IsFloat(field.Type()) { records.WriteString(fmt.Sprintf(valueFormat, F64ToString(field.Float()))) } else { value := "" if IsPointer(field.Type()) { if field.Elem().IsValid() { value = fmt.Sprintf(valueFormatCore, field.Elem()) } } else { value = fmt.Sprintf(valueFormatCore, field) } records.WriteString(fmt.Sprintf(valueFormat, value)) } if c < cols-1 { records.WriteString(",") } else { records.WriteString("\n") } } } return fmt.Sprintf("%s%s\n%s", Utf8BOM, headers.String(), records.String()) } func headerLookup[T any](d T, c int) (string, bool) { return reflect.ValueOf(d).Type().Field(c).Tag.Lookup("header") } func noLookup[T any](d T, c int) (string, bool) { return reflect.ValueOf(d).Type().Field(c).Tag.Lookup("no") }
package csvx import ( "bufio" "encoding/csv" "fmt" "io" "mime/multipart" "reflect" "strconv" ) type model[T any] struct { Data T } // FileHeaderReader extracts the header of a multipart file specified by the given *multipart.FileHeader parameter // and returns a slice of slices of strings representing the parsed header. Each slice in the result represents // a single header field, where the first element is the header field name and the second element is the header field value. // If the header is empty or cannot be parsed, an empty slice will be returned. If an error occurs during the operation, // an error value will be returned. // Ex: // file, _ := c.FormFile("file") // rows, err := csvx.FileHeaderReader(file) func FileHeaderReader(fileHeader *multipart.FileHeader) ([][]string, error) { file, err := fileHeader.Open() if err != nil { return [][]string{}, err } // Parse the file r := csv.NewReader(bufio.NewReader(file)) // Read the records _, err = r.Read() if err != nil { return [][]string{}, err } // Iterate through the records rows := [][]string{} for { // Read each record from csv record, e := r.Read() if e == io.EOF { break } rows = append(rows, record) } return rows, nil } // ParserString is a generic function that takes a slice of slices of strings as input and returns a slice of values of type T, // where T is a type parameter that represents the desired output type. The input slice should represent a CSV file // or other tabular data in which each inner slice represents a single row of data, and each element in the inner slice represents // a single field value. This function will attempt to parse each field value into the corresponding type T using the built-in strconv package. // If parsing fails or the input slice is empty, an empty slice of type T will be returned. // // type Struct struct { // ID string `header:"ID"` // Name string `header:"Name Space"` // } // // rows := [][]string{ // {"ID", "Name Space"}, // {"1", "Name1"}, // } // // s := csvx.ParserString[Struct](rows) func ParserString[T any](rows [][]string) []T { var structs []T if len(rows) == 0 { return structs } header := rows[0] for i, row := range rows { if i == 0 { continue } record := model[T]{} structValue := reflect.ValueOf(&record.Data).Elem() for j, field := range row { structField := structValue.FieldByNameFunc(func(fieldName string) bool { f, _ := reflect.TypeOf(record.Data).FieldByName(fieldName) fieldTag := f.Tag.Get("header") head := RemoveDoubleQuote(header[j]) return fieldTag == fmt.Sprintf("%v", head) }) if structField.IsValid() { structField.SetString(field) } } structs = append(structs, record.Data) } return structs } // Parser parses the provided input data and returns the result. // It handles different formats based on the input type. func Parser[T any](rows [][]string) []T { var structs []T if len(rows) == 0 { return structs } header := rows[0] for i, row := range rows { if i == 0 { continue } record := model[T]{} structValue := reflect.ValueOf(&record.Data).Elem() for j, field := range row { structField := structValue.FieldByNameFunc(func(fieldName string) bool { f, _ := reflect.TypeOf(record.Data).FieldByName(fieldName) fieldTag := f.Tag.Get("header") head := RemoveDoubleQuote(header[j]) return fieldTag == fmt.Sprintf("%v", head) }) if structField.IsValid() { // Convert the value based on the field kind switch structField.Kind() { case reflect.Ptr: // Handle pointer types fieldType := structField.Type() elemType := fieldType.Elem() ptrValue := reflect.New(elemType) switch elemType.Kind() { case reflect.String: ptrValue.Elem().SetString(field) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: value, err := strconv.ParseInt(field, 10, 64) if err == nil { ptrValue.Elem().SetInt(value) } else { structField.Set(reflect.Zero(fieldType)) continue } case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: value, err := strconv.ParseUint(field, 10, 64) if err == nil { ptrValue.Elem().SetUint(value) } else { structField.Set(reflect.Zero(fieldType)) continue } case reflect.Float32, reflect.Float64: value, err := strconv.ParseFloat(field, 64) if err == nil { ptrValue.Elem().SetFloat(value) } else { structField.Set(reflect.Zero(fieldType)) continue } case reflect.Bool: value, err := strconv.ParseBool(field) if err == nil { ptrValue.Elem().SetBool(value) } case reflect.Struct: ptrValue.Elem().Set(reflect.ValueOf(field)) } structField.Set(ptrValue) default: // Handle non-pointer types as before switch structField.Kind() { case reflect.String: structField.SetString(field) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: value, err := strconv.ParseInt(field, 10, 64) if err == nil { structField.SetInt(value) } case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: value, err := strconv.ParseUint(field, 10, 64) if err == nil { structField.SetUint(value) } case reflect.Float32, reflect.Float64: value, err := strconv.ParseFloat(field, 64) if err == nil { structField.SetFloat(value) } case reflect.Bool: value, err := strconv.ParseBool(field) if err == nil { structField.SetBool(value) } case reflect.Struct: structField.Set(reflect.ValueOf(field)) } } } } structs = append(structs, record.Data) } return structs } // ParserFunc processes the input data using a custom parsing function. // This allows for flexible and reusable parsing logic. // // err := csvx.ParserFunc(true, rows, func (record []string) { // return nil // }) func ParserFunc(excludeHeader bool, rows [][]string, onRecord func([]string) error) error { for i, row := range rows { if excludeHeader && i == 0 { continue } if err := onRecord(row); err != nil { return err } } return nil } // ParserByReader parses data from an io.Reader and returns the result. // This is useful for streaming data or reading from large files. func ParserByReader[T any](ir *csv.Reader, delimiter ...rune) []T { d := ',' if len(delimiter) > 0 { d = delimiter[0] } return Parser[T](Reader(ir, func(r *csv.Reader) { r.Comma = d })) }
package csvx import ( "bytes" "encoding/csv" "io" "os" ) // ReadByte reads the entire contents of the file with the specified filename and returns them as a slice of bytes. // If an error occurs during the operation, a nil slice will be returned. This function can be used to read the contents // of text files or other files that are encoded as byte streams. Note that this function may not be suitable for reading // large files, as it reads the entire file into memory at once. For large files, consider using the os package or // a buffered reader to read the file in smaller chunks. func ReadByte(filename string) []byte { data, err := os.ReadFile(filename) if err != nil { return []byte{} } return data } // ByteReader creates an io.Reader from a byte slice. // It allows the byte data to be read sequentially as a stream. func ByteReader(data []byte, options ...func(r *csv.Reader)) [][]string { // Create a bytes.Reader from the byte slice byteReader := bytes.NewReader(data) // Parse the file r := csv.NewReader(byteReader) return Reader(r, options...) } // Reader wraps an existing io.Reader to provide additional functionality. // It may include features like buffering or line-by-line reading. func Reader(r *csv.Reader, options ...func(r *csv.Reader)) [][]string { r.LazyQuotes = true r.Comma = ',' r.Comment = '#' // Set the comment character (lines beginning with this are ignored) for _, option := range options { option(r) } // Iterate through the records rows := [][]string{} for { // Read each record from csv record, e := r.Read() if e == io.EOF { break } rows = append(rows, record) } return rows }