package Task import ( "ColdVerify_local/conf" "ColdVerify_local/lib" "ColdVerify_local/logs" "encoding/json" "fmt" "github.com/astaxie/beego/cache" _ "github.com/astaxie/beego/cache/redis" "github.com/beego/beego/v2/adapter/orm" orm2 "github.com/beego/beego/v2/client/orm" "github.com/go-sql-driver/mysql" _ "github.com/go-sql-driver/mysql" "strconv" "strings" "time" ) const ( Temperature = "Temperature" Humidity = "Humidity" ) // 模板 type TaskData struct { Id int `orm:"column(ID);size(11);auto;pk"` T_id string `orm:"size(256);null"` // 标题 T_t float32 `orm:"size(10);null"` // 温度 T_rh float32 `orm:"size(10);null"` // 湿度 T_time time.Time `orm:"type(timestamp);null;"` // 采集时间 CreateTime time.Time `orm:"column(create_time);type(timestamp);null;auto_now_add"` //auto_now_add 第一次保存时才设置时间 UpdateTime time.Time `orm:"column(update_time);type(timestamp);null;auto_now"` //auto_now 每次 model 保存时都会对时间自动更新 } func (t *TaskData) TableName() string { return "task_data" // 数据库名称 // ************** 替换 FormulaList ************** } var redisCache_TaskData cache.Cache func init() { //orm2.Debug = true config := fmt.Sprintf(`{"key":"%s","conn":"%s","dbNum":"%s","password":"%s"}`, "redis_"+"TaskData", conf.Redis_address, conf.Redis_dbNum, conf.Redis_password) logs.Println(config) var err error redisCache_TaskData, err = cache.NewCache("redis", config) if err != nil || redisCache_TaskData == nil { errMsg := "failed to init redis" logs.Println(errMsg, err) } } type TaskDataJPG struct { State int `json:"state"` //1:生成中 2:已完成 3:失败 Msg string `json:"msg"` Url string `json:"url"` //url } // ---------------- Redis ------------------- // Redis_Set(m.T_sn,m) // Redis 更新缓存 func Redis_TaskDataJPG_Set(key string, r TaskDataJPG) (err error) { //json序列化 str, err := json.Marshal(r) if err != nil { logs.Error(lib.FuncName(), err) return } err = redisCache_TaskData.Put(key, str, 5*time.Minute) if err != nil { logs.Println("set key:", key, ",value:", str, err) } return } // if r,is :=Redis_Get(T_sn);is{ // return r,nil // } func Redis_TaskDataJPG_Get(key string) (r TaskDataJPG, is bool) { if redisCache_TaskData.IsExist(key) { logs.Println("找到key:", key) v := redisCache_TaskData.Get(key) json.Unmarshal(v.([]byte), &r) return r, true } logs.Println("没有 找到key:", key) return TaskDataJPG{}, false } func Redis_TaskDataJPG_Del(key string) (err error) { err = redisCache_TaskData.Delete(key) return } // 创建数据库 Device.CREATE_TaskData("") func CREATE_TaskData(alias_name, T_task_id string) bool { o := orm2.NewOrmUsingDB(alias_name) sql := "DROP TABLE IF EXISTS `z_task_data_" + T_task_id + "`" o.Raw(sql).Exec() sql = "CREATE TABLE IF NOT EXISTS `z_task_data_" + T_task_id + "` ( " + " `ID` int(11) NOT NULL AUTO_INCREMENT," + " `t_sn` varchar(256) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL," + " `t_id` varchar(256) DEFAULT NULL," + " `t_t` float(6, 1) NULL DEFAULT NULL," + " `t_rh` float(6, 1) NULL DEFAULT NULL," + " `t_time` datetime(0) NULL DEFAULT NULL," + " PRIMARY KEY (`ID`) USING BTREE," + " KEY `t_sn` (`t_sn`)," + " KEY `t_id` (`t_id`)," + " KEY `t_time` (`t_time`)" + ") ENGINE = InnoDB AUTO_INCREMENT = 5 CHARACTER SET = utf8mb4 COLLATE = utf8mb4_general_ci ROW_FORMAT = Dynamic;" _, err := o.Raw(sql).Exec() if err != nil { return false } //sqlIndex := "ALTER TABLE `z_task_data_" + T_task_id + "` add unique index(`t_sn`,`t_id`,`t_time`);" //logs.Debug(sqlIndex) //o.Raw(sqlIndex).Exec() return true } // ---------------- 特殊方法 ------------------- // 清空 func Truncate_TaskData(alias_name, T_task_id string) bool { o := orm2.NewOrmUsingDB(alias_name) sql := "truncate table z_task_data_" + T_task_id logs.Println(sql) _, err := o.Raw(sql).Exec() if err != nil { // 数据库表不存在,则创建数据库 if err.(*mysql.MySQLError).Number == 1146 { CREATE_TaskData(alias_name, T_task_id) return true } logs.Error(lib.FuncName(), err) return false } return true } type TaskData_ struct { ID int `orm:"column(ID);size(100);null"` // ID T_sn string `orm:"column(t_sn);size(256);null"` // sn T_id string `orm:"column(t_id);size(256);null"` // 标题 T_t float32 `orm:"column(t_t);size(10);null"` // 温度 T_rh float32 `orm:"column(t_rh);size(10);null"` // 湿度 T_time string `orm:"column(t_times);null;"` // 采集时间 T_Certificate_sn string `orm:"size(256);null"` // 证书编号 } type TaskDataClass_ struct { T_sn string `orm:"column(t_sn);size(256);null"` // 标题 T_id string `orm:"column(t_id);size(256);null"` // 名称 } func Read_TaskData_ById_List(T_task_id string, SN string, T_id string, Time_start_ string, Time_end_ string, page int, page_z int) ([]TaskData_, int64) { o := orm.NewOrm() var maps []TaskData_ var maps_z []orm2.ParamsList pagez := page_z var offset int if page <= 1 { offset = 0 } else { page -= 1 offset = page * pagez } sql_condition := "" if len(Time_start_) > 1 { sql_condition += " AND t_time >= '" + Time_start_ + "'" } if len(Time_end_) > 1 { sql_condition += " AND t_time <= '" + Time_end_ + "'" } if len(T_id) > 0 { sql_condition += " AND t_id = '" + T_id + "'" } if len(SN) > 0 { sql_condition += " AND t_sn = '" + SN + "'" } if len(sql_condition) > 0 { sql_condition = " WHERE " + strings.TrimLeft(sql_condition, " AND ") } sql := "SELECT COUNT(ID) FROM z_task_data_" + T_task_id + sql_condition fmt.Println(sql) _, err := o.Raw(sql).ValuesList(&maps_z) if err != nil { return maps, 0 } if len(maps_z) == 0 { return maps, 0 } //fmt.Println("maps_z;",maps_z[0][0]) sql = "SELECT ID,t_sn,t_id,t_t,t_rh,DATE_FORMAT(t_time,'%Y-%m-%d %H:%i:%s') AS t_times,t_time FROM z_task_data_" + T_task_id + sql_condition + " ORDER BY t_time DESC" if page_z != 9999 { sql = sql + " LIMIT " + strconv.Itoa(offset) + "," + strconv.Itoa(pagez) } fmt.Println(sql) _, err = o.Raw(sql).QueryRows(&maps) if err != nil { logs.Error(lib.FuncName(), err) } //value, _ := strconv.ParseFloat(fmt.Sprintf("%.2f", cnt), 64) key, _ := strconv.Atoi(maps_z[0][0].(string)) return maps, int64(key) } func Read_TaskData_ById_List_AES(T_task_id string, SN string, T_id string, Time_start_ string, Time_end_ string, page int, page_z int) ([]TaskData_, int64) { o := orm.NewOrm() var maps []TaskData_ var maps_z []orm2.ParamsList pagez := page_z var offset int if page <= 1 { offset = 0 } else { page -= 1 offset = page * pagez } sql_condition := "" if len(Time_start_) > 1 { sql_condition += " AND t_time >= '" + Time_start_ + "'" } if len(Time_end_) > 1 { sql_condition += " AND t_time <= '" + Time_end_ + "'" } if len(T_id) > 0 { sql_condition += " AND t_id = '" + T_id + "'" } if len(SN) > 0 { sql_condition += " AND t_sn = '" + SN + "'" } if len(sql_condition) > 0 { sql_condition = " WHERE " + strings.TrimLeft(sql_condition, " AND ") } sql := "SELECT COUNT(ID) FROM z_task_data_" + T_task_id + sql_condition fmt.Println(sql) _, err := o.Raw(sql).ValuesList(&maps_z) if err != nil { return maps, 0 } if len(maps_z) == 0 { return maps, 0 } //fmt.Println("maps_z;",maps_z[0][0]) sql = "SELECT ID,t_sn,t_id,t_t,t_rh,DATE_FORMAT(t_time,'%Y-%m-%d %H:%i:%s') AS t_times,t_time FROM z_task_data_" + T_task_id + sql_condition + " ORDER BY t_time" if page_z != 9999 { sql = sql + " LIMIT " + strconv.Itoa(offset) + "," + strconv.Itoa(pagez) } fmt.Println(sql) _, err = o.Raw(sql).QueryRows(&maps) if err != nil { logs.Error(lib.FuncName(), err) } //value, _ := strconv.ParseFloat(fmt.Sprintf("%.2f", cnt), 64) key, _ := strconv.Atoi(maps_z[0][0].(string)) return maps, int64(key) } func Read_TaskData_ById_List_(T_task_id string, SN string) []TaskData_ { o := orm.NewOrm() var maps []TaskData_ sql_condition := "" sql_condition += " t_sn ='" + SN + "'" //fmt.Println("maps_z;",maps_z[0][0]) sql := "SELECT ID,t_sn,t_id,t_t,t_rh,DATE_FORMAT(t_time,'%Y-%m-%d %H:%i') AS t_times,t_time FROM z_task_data_" + T_task_id + " WHERE " + sql_condition + " ORDER BY t_time " fmt.Println(sql) _, err := o.Raw(sql).QueryRows(&maps) if err != nil { logs.Error(lib.FuncName(), err) return maps } return maps } func Read_TaskData_ById_ClassList(T_task_id string) []TaskDataClass_ { o := orm.NewOrm() var maps []TaskDataClass_ //fmt.Println("maps_z;",maps_z[0][0]) //sql := "SELECT DISTINCT t_sn,t_id FROM z_task_data_" + T_task_id + " ORDER BY t_id " //sql := "SELECT DISTINCT t_sn FROM z_task_data_" + T_task_id + " ORDER BY t_id " sql := "SELECT t_sn,t_id FROM z_task_data_" + T_task_id + " GROUP BY t_sn,t_id ORDER BY t_id+0 " fmt.Println(sql) _, err := o.Raw(sql).QueryRows(&maps) if err != nil { logs.Error(lib.FuncName(), err) return maps } return maps } func Read_TaskData_sn(T_task_id string) []TaskDataClass_ { o := orm.NewOrm() var maps []TaskDataClass_ sql := "SELECT DISTINCT t_sn FROM z_task_data_" + T_task_id + " ORDER BY t_sn " fmt.Println(sql) _, err := o.Raw(sql).QueryRows(&maps) if err != nil { logs.Error(lib.FuncName(), err) return maps } return maps } func Update_TaskData_ByT_sn(T_task_id, T_sn, T_id string) error { o := orm.NewOrm() // 修改id sql := "UPDATE z_task_data_" + T_task_id + " SET `t_id` = '" + T_id + "' WHERE `t_sn` = '" + T_sn + "'" logs.Println(sql) _, err := o.Raw(sql).Exec() if err != nil { logs.Error(lib.FuncName(), err) return err } return nil } func Delete_TaskData_ByT_sn(T_task_id, T_sn string) error { o := orm.NewOrm() // 修改id sql := "DELETE FROM z_task_data_" + T_task_id + " WHERE `t_sn` = '" + T_sn + "'" logs.Println(sql) _, err := o.Raw(sql).Exec() if err != nil { logs.Error(lib.FuncName(), err) return err } return nil } // 添加 func Add_TaskData(T_task_id string, T_sn string, T_id string, T_t string, T_rh string, T_time string) bool { o := orm2.NewOrm() tx, err := o.Begin() if err != nil { logs.Error("start the transaction failed") return false } // 开始插入数据 // //sql := "INSERT INTO z_task_data_" + T_task_id + " (`t_sn`, `t_id`, `t_time`) " + // "VALUES ('" + T_sn + "', " + T_id + ", '" + T_time + "') " + // "ON DUPLICATE KEY UPDATE t_t=" + T_t + ", t_rh="+ T_rh +";" // 去重复数据 sql := "DELETE FROM z_task_data_" + T_task_id + " WHERE " + " t_id = '" + T_id + "' AND " + " t_sn = '" + T_sn + "' " + "AND t_time = '" + T_time + "' " // 这里有时间优化 用于一次 prepare 多次 exec,以提高批量执行的速度 //fmt.Println(sql) res, err := tx.Raw(sql).Exec() if err != nil { tx.Rollback() logs.Error(lib.FuncName(), err) return false } res.RowsAffected() sql = "INSERT INTO z_task_data_" + T_task_id + " (`t_sn`, `t_id`, `t_t`, `t_rh`, `t_time`) " + "VALUES ('" + T_sn + "', '" + T_id + "', " + T_t + "," + T_rh + ", '" + T_time + "')" // 更新数据 //sql := "INSERT INTO z_task_data_" + T_task_id + " (`t_sn`, `t_id`, `t_t`, `t_rh`, `t_time`) " + // "VALUES ('" + T_sn + "', " + T_id + ", " + T_t + "," + T_rh + ", '" + T_time + "')" + // "on duplicate key update `t_t`=" + T_t + ",`t_rh`=" + T_rh //fmt.Println(sql) res, err = tx.Raw(sql).Exec() if err != nil { tx.Rollback() logs.Error(lib.FuncName(), err) return false } tx.Commit() //fmt.Println("mysql row affected nums: ", num) return true } // 创建唯一索引 func Create_Unique_Index(T_task_id string) error { // 创建索引 o := orm.NewOrm() createIndexSql := "ALTER TABLE z_task_data_" + T_task_id + " ADD CONSTRAINT unique_index_t_sn_t_id_t_time UNIQUE (t_sn, t_id, t_time);" //fmt.Println(sql) _, err := o.Raw(createIndexSql).Exec() if err != nil && !strings.Contains(err.Error(), "Duplicate key name 'unique_index_t_sn_t_id_t_time'") { logs.Error(lib.FuncName(), err) return err } return nil } // 删除唯一索引 func Delete_Unique_Index(T_task_id string) error { // 创建索引 o := orm.NewOrm() // 删除索引 deleteIndexSql := "ALTER TABLE z_task_data_" + T_task_id + " DROP INDEX unique_index_t_sn_t_id_t_time;" //fmt.Println(sql) _, err := o.Raw(deleteIndexSql).Exec() if err != nil { logs.Error(lib.FuncName(), err) return err } return nil } func Adds_TaskData(T_task_id string, valueStrings []string) error { o := orm.NewOrm() // 插入数据 sql := "INSERT INTO z_task_data_" + T_task_id + " (`t_sn`, `t_id`, `t_t`, `t_rh`, `t_time`) VALUES" sql += strings.Join(valueStrings, ",") sql += " ON DUPLICATE KEY UPDATE t_t = VALUES(t_t), t_rh = VALUES(t_rh)" //fmt.Println(sql) _, err := o.Raw(sql).Exec() if err != nil { logs.Error(lib.FuncName(), err) return err } //fmt.Println("mysql row affected nums: ", num) return nil } // 修改 func Up_TaskData(T_task_id string, Id string, T_t string, T_rh string, T_time string) bool { o := orm.NewOrm() SET_str := " SET " if len(T_t) > 0 { SET_str = SET_str + " t_t = " + T_t + " ," } if len(T_rh) > 0 { SET_str = SET_str + " t_rh = " + T_rh + " ," } if len(T_time) > 0 { SET_str = SET_str + " t_time = '" + T_time + "' ," } if len(SET_str) > 7 { SET_str = SET_str[:(len(SET_str) - 1)] } // 开始插入数据 UPDATE `cold_verify`.`Z_TaskData_d8qMyeXLzIxn` SET `t_t` = 20.2 WHERE `ID` = 69 sql := "UPDATE z_task_data_" + T_task_id + " " + SET_str + " WHERE ID = " + Id // 这里有时间优化 用于一次 prepare 多次 exec,以提高批量执行的速度 //fmt.Println(sql) res, err := o.Raw(sql).Exec() if err != nil { logs.Error(lib.FuncName(), err) return false } res.RowsAffected() //fmt.Println("mysql row affected nums: ", num) return true } // 删除 func Del_TaskData(T_task_id string, Id string) bool { o := orm.NewOrm() // 开始插入数据 UPDATE `cold_verify`.`Z_TaskData_d8qMyeXLzIxn` SET `t_t` = 20.2 WHERE `ID` = 69 sql := "DELETE FROM z_task_data_" + T_task_id + " WHERE ID = " + Id // 这里有时间优化 用于一次 prepare 多次 exec,以提高批量执行的速度 logs.Println(sql) res, err := o.Raw(sql).Exec() if err != nil { logs.Error(lib.FuncName(), err) return false } res.RowsAffected() //fmt.Println("mysql row affected nums: ", num) return true } func Read_TaskData_ByT_id(T_task_id string, Id string) (t_sn string, err error) { o := orm.NewOrm() // 开始插入数据 UPDATE `cold_verify`.`Z_TaskData_d8qMyeXLzIxn` SET `t_t` = 20.2 WHERE `ID` = 69 sql := "SELECT t_sn FROM z_task_data_" + T_task_id + " WHERE t_id = '" + Id + "'" // 这里有时间优化 用于一次 prepare 多次 exec,以提高批量执行的速度 logs.Println(sql) err = o.Raw(sql).QueryRow(&t_sn) if err != nil { logs.Error(lib.FuncName(), err) return t_sn, err } //fmt.Println("mysql row affected nums: ", num) return t_sn, err } func Read_TaskData_ByT_sn(T_task_id string, sn string) (t_id string, err error) { o := orm.NewOrm() // 开始插入数据 UPDATE `cold_verify`.`Z_TaskData_d8qMyeXLzIxn` SET `t_t` = 20.2 WHERE `ID` = 69 sql := "SELECT t_id FROM z_task_data_" + T_task_id + " WHERE t_sn = '" + sn + "'" // 这里有时间优化 用于一次 prepare 多次 exec,以提高批量执行的速度 logs.Println(sql) err = o.Raw(sql).QueryRow(&t_id) if err != nil { logs.Error(lib.FuncName(), err) return t_id, err } //fmt.Println("mysql row affected nums: ", num) return t_id, err } // 添加 func Del_TaskData_t_id(T_task_id string, Id string) bool { o := orm.NewOrm() // 开始插入数据 UPDATE `cold_verify`.`Z_TaskData_d8qMyeXLzIxn` SET `t_t` = 20.2 WHERE `ID` = 69 sql := "DELETE FROM z_task_data_" + T_task_id + " WHERE t_id = '" + Id + "'" // 这里有时间优化 用于一次 prepare 多次 exec,以提高批量执行的速度 logs.Println(sql) res, err := o.Raw(sql).Exec() if err != nil { logs.Error(lib.FuncName(), err) return false } res.RowsAffected() //fmt.Println("mysql row affected nums: ", num) return true } // 检查导出表总数 func Check_TaskData_Num(T_task_id string) bool { o1 := orm2.NewOrmUsingDB(conf.Local_AliasName) o2 := orm2.NewOrmUsingDB(conf.Server_AliasName) var maps_z1, maps_z2 []orm2.ParamsList sql := "SELECT COUNT(ID) FROM z_task_data_" + T_task_id fmt.Println(sql) _, err := o1.Raw(sql).ValuesList(&maps_z1) if err != nil { return false } _, err = o2.Raw(sql).ValuesList(&maps_z2) if err != nil { return false } if len(maps_z1) != len(maps_z2) { return false } return true } // 检查导出表总数 func Check_TaskData_Num_Verify1(T_task_id string) bool { o1 := orm2.NewOrmUsingDB(conf.Local_AliasName) o2 := orm2.NewOrmUsingDB(conf.Verify1_AliasName) var maps_z1, maps_z2 []orm2.ParamsList sql := "SELECT COUNT(ID) FROM z_task_data_" + strings.ToLower(T_task_id) fmt.Println(sql) _, err := o1.Raw(sql).ValuesList(&maps_z1) if err != nil { return false } sql2 := "SELECT COUNT(ID) FROM Z_TaskData_" + T_task_id _, err = o2.Raw(sql2).ValuesList(&maps_z2) if err != nil { return false } if len(maps_z1) != len(maps_z2) { return false } return true } func Read_TaskData_Num(T_task_id string) int { o1 := orm2.NewOrmUsingDB(conf.Local_AliasName) var maps_z []orm2.ParamsList sql := "SELECT COUNT(ID) FROM z_task_data_" + T_task_id fmt.Println(sql) _, err := o1.Raw(sql).ValuesList(&maps_z) if err != nil { logs.Error(lib.FuncName(), err) return 0 } key, _ := strconv.Atoi(maps_z[0][0].(string)) return key } func Dump_TaskData(T_task_id, root, password, url_port, database, sql_file string) (string, error) { // url_port 127.0.0.1:3306 // mysql8.0 以上加 --column-statistics=0 // mysqldump --column-statistics=0 -uroot -proot -h127.0.0.1 -P3306 cold_verify Z_TaskData_ixEfo5zk2Oeb > /Data/Z_TaskData_ixEfo5zk2Oeb.sql v := Read_Local_Mysql_Version() host_port := strings.Split(url_port, ":") table_name := "z_task_data_" + T_task_id org := "mysqldump " if v >= 8 { org += "--column-statistics=0 " } //--no-create-info 只导出数据,而不添加 CREATE TABLE 语句。 //--single-transaction 在备份库的时候并不锁定数据表 //--add-locks:在每个表导出之前增加LOCK TABLES并且之后UNLOCK TABLE。(默认为打开状态,使用--skip-add-locks取消选项) //--compact:压缩模式,产生更少的输出; org = org + fmt.Sprintf("-u%s -p%s -h%s -P%s --no-create-info --set-gtid-purged=OFF --skip-add-locks --compact --quick %s %s > %s ", root, password, host_port[0], host_port[1], database, table_name, sql_file) logs.Println(org) _, err := lib.Command(org) if err != nil { logs.Error(lib.FuncName(), err) } return org, err } func Dump_TaskData_Verify(T_task_id, root, password, url_port, database, sql_file string) (string, error) { // url_port 127.0.0.1:3306 // mysql8.0 以上加 --column-statistics=0 // mysqldump --column-statistics=0 -uroot -proot -h127.0.0.1 -P3306 cold_verify Z_TaskData_ixEfo5zk2Oeb > /Data/Z_TaskData_ixEfo5zk2Oeb.sql v := Read_Local_Mysql_Version() host_port := strings.Split(url_port, ":") table_name := "Z_TaskData_" + T_task_id org := "mysqldump " if v >= 8 { org += "--column-statistics=0 " } //--no-create-info 只导出数据,而不添加 CREATE TABLE 语句。 //--single-transaction 在备份库的时候并不锁定数据表 //--add-locks:在每个表导出之前增加LOCK TABLES并且之后UNLOCK TABLE。(默认为打开状态,使用--skip-add-locks取消选项) //--compact:压缩模式,产生更少的输出; org = org + fmt.Sprintf("-u%s -p%s -h%s -P%s --no-create-info --set-gtid-purged=OFF --skip-add-locks --compact --quick %s %s > %s ", root, password, host_port[0], host_port[1], database, table_name, sql_file) logs.Println(org) _, err := lib.Command(org) if err != nil { logs.Error(lib.FuncName(), err) } return org, err } func Insert_TaskData(root, password, url_port, database, sql_file string) (string, error) { // url_port 127.0.0.1:3306 // mysql -u root -p root -h127.0.0.1 -P3306 cold_verify_local < /data/Z_TaskData_ixEfo5zk2Oeb.sql host_port := strings.Split(url_port, ":") org := fmt.Sprintf("mysql -u%s -p%s -h%s -P%s %s < %s", root, password, host_port[0], host_port[1], database, sql_file) logs.Println(org) _, err := lib.Command(org) if err != nil { logs.Error(lib.FuncName(), err) } return org, err } func Read_Local_Mysql_Version() int { o := orm2.NewOrmUsingDB(conf.Local_AliasName) var params []orm2.Params o.Raw("select version();").Values(¶ms) version, _ := strconv.Atoi(params[0]["version()"].(string)[0:1]) return version } func Import_TaskData(T_task_id string, offset, pagez int) bool { var maps []TaskData_ serverOrm := orm2.NewOrmUsingDB(conf.Server_AliasName) localOrm := orm2.NewOrmUsingDB(conf.Local_AliasName) tb_name := "z_task_data_" + T_task_id // 开从先上版查询数据 sql := "select `ID`, `t_sn`, `t_id`, `t_t`, `t_rh`, DATE_FORMAT(t_time,'%Y-%m-%d %H:%i:%s') AS t_times from " + tb_name + " LIMIT " + strconv.Itoa(offset) + "," + strconv.Itoa(pagez) logs.Println(sql) _, err := serverOrm.Raw(sql).QueryRows(&maps) if err != nil { logs.Error(lib.FuncName(), err) return false } values := []string{} for _, row := range maps { // 处理数据 values = append(values, fmt.Sprintf("('%s',%d,%v,%v,'%s')", row.T_sn, row.T_id, row.T_t, row.T_rh, row.T_time)) } // 向本地版插入数据 sql = "INSERT IGNORE INTO " + tb_name + "( `t_sn`, `t_id`, `t_t`, `t_rh`, `t_time`) VALUES " + strings.Join(values, ",") //logs.Println(sql) _, err = localOrm.Raw(sql).Exec() if err != nil { logs.Error(lib.FuncName(), err) return false } return true } // InsertTaskData 添加设备数据 func InsertTaskData(T_task_id string, d TaskData_) { o := orm.NewOrm() var maps TaskData_ sql := fmt.Sprintf("SELECT * FROM z_task_data_%s where t_time='%s' and t_id = '%s' and t_sn = '%s'", T_task_id, d.T_time, d.T_id, d.T_sn) err := o.Raw(sql).QueryRow(&maps) if err != nil && err.Error() != orm.ErrNoRows.Error() { logs.Error(lib.FuncName(), err) } // 添加 if maps.ID == 0 { sqlStatement := fmt.Sprintf("insert into z_task_data_%s(t_sn, t_id, t_t, t_rh, t_time) value('%s','%s',%f,%f,'%s')", T_task_id, d.T_sn, d.T_id, d.T_t, d.T_rh, d.T_time) _, err = o.Raw(sqlStatement).Exec() if err != nil { logs.Error(lib.FuncName(), err) } return } //已经存在该条目,更新 sqlStatement := fmt.Sprintf("update z_task_data_%s set t_t = %f,t_rh = %f where t_time= '%s' and t_id = '%s' and t_sn = '%s'", T_task_id, d.T_t, d.T_rh, d.T_time, d.T_id, d.T_sn) _, err = o.Raw(sqlStatement).Exec() if err != nil { logs.Error(lib.FuncName(), err) } } // DeleteTaskDataByTimeRange 删除时间范围内的数据 func DeleteTaskDataByTimeRange(T_task_id, sn string, id string, start, end string) { sqlStatement := fmt.Sprintf("delete from z_task_data_%s where t_time between '%s' and '%s' and t_id = '%s' and t_sn = '%s'", T_task_id, start, end, id, sn) o := orm.NewOrm() exec, err := o.Raw(sqlStatement).Exec() if err != nil { fmt.Println(err.Error()) } affected, _ := exec.RowsAffected() fmt.Printf("从%s~%s时间段删除了%d条数据", start, end, affected) } // UpdateTaskDataTemperatureAndHumidityRandom 随机更新 func UpdateTaskDataTemperatureAndHumidityRandom(T_task_id, sn, id, startTime, endTime string, ttMax, ttMin, trhMax, trhMin int) { //sql语句 sqlStatement := fmt.Sprintf("update z_task_data_%s set t_t = t_t + FLOOR(%d + RAND() * (%d - %d + 1)) /100.0, t_rh = t_rh + FLOOR(%d + RAND() * (%d - %d + 1)) /100.0 where t_time BETWEEN '%s' AND '%s' and t_id = '%s' and t_sn = '%s'", T_task_id, ttMin, ttMax, ttMin, trhMin, trhMax, trhMin, startTime, endTime, id, sn) o := orm.NewOrm() exec, err := o.Raw(sqlStatement).Exec() if err != nil { fmt.Println("执行错误:", sqlStatement) } affected, _ := exec.RowsAffected() fmt.Println("执行更新行数:", affected) } // UpdateTaskDataTemperatureAndHumidity 更新设备探头数据温湿度 func UpdateTaskDataTemperatureAndHumidity(T_task_id, sn, id, startTime, endTime string, temperature, humidity float64) { sqlStatement := fmt.Sprintf("update z_task_data_%s set t_t = t_t + %f , t_rh = t_rh + %f where t_id = '%s' and t_sn = '%s' and t_time BETWEEN '%s' and '%s'", T_task_id, temperature, humidity, id, sn, startTime, endTime) o := orm.NewOrm() exec, err := o.Raw(sqlStatement).Exec() if err != nil { fmt.Println(err.Error()) } affected, err := exec.RowsAffected() if err != nil { fmt.Println(err.Error()) } fmt.Printf("影响了%d行\n", affected) } // UpdateTaskData 更新设备探头数据 func UpdateTaskData(T_task_id, sn, id string, old, newO TaskData_) { sqlStatement := fmt.Sprintf("update z_task_data_%s set t_t = %f , t_rh = %f where t_sn = '%s' and t_id = '%s' and t_t = %f and t_rh = %f and t_time= '%s'", T_task_id, newO.T_t, newO.T_rh, sn, id, old.T_t, old.T_rh, old.T_time) fmt.Println("执行SQL:", sqlStatement) o := orm.NewOrm() exec, err := o.Raw(sqlStatement).Exec() if err != nil { fmt.Println("执行错误:", sqlStatement, err.Error()) } affected, _ := exec.RowsAffected() fmt.Println("执行更新行数:", affected) } // 查询温度最小值 最大值 func Read_TaskData_T_Min_Max(T_task_id string, SN string, T_id string, Time_start_ string, Time_end_ string) (minT, maxT float64) { o := orm.NewOrm() sql_condition := "" if len(Time_start_) > 1 { sql_condition += " AND t_time >= '" + Time_start_ + "'" } if len(Time_end_) > 1 { sql_condition += " AND t_time <= '" + Time_end_ + "'" } if len(T_id) > 0 { sql_condition += " AND t_id = '" + T_id + "'" } if len(SN) > 0 { sql_condition += " AND t_sn = '" + SN + "'" } if len(sql_condition) > 0 { sql_condition = " WHERE " + strings.TrimLeft(sql_condition, " AND ") } //fmt.Println("maps_z;",maps_z[0][0]) sql := "SELECT MIN(t_t) AS min_t, MAX(t_t) AS max_t FROM z_task_data_" + T_task_id + sql_condition fmt.Println(sql) err := o.Raw(sql).QueryRow(&minT, &maxT) if err != nil { logs.Error(lib.FuncName(), err) } return } // UpdateTaskDataTemperatureAndHumidityByGeometric 更新设备探头数据温湿度 等比缩放 func UpdateTaskDataTemperatureAndHumidityByGeometric(T_task_id, sn, id, startTime, endTime string, temperature, humidity float64) { sqlStatement := fmt.Sprintf("update z_task_data_%s set t_t = t_t * %f , t_rh = t_rh * %f where t_id = '%s' and t_sn = '%s' and t_time BETWEEN '%s' and '%s'", T_task_id, temperature, humidity, id, sn, startTime, endTime) o := orm.NewOrm() exec, err := o.Raw(sqlStatement).Exec() if err != nil { fmt.Println(err.Error()) } affected, err := exec.RowsAffected() if err != nil { fmt.Println(err.Error()) } fmt.Printf("影响了%d行\n", affected) } func Read_TaskData_T_Min_Max_Time_Min_Max(T_task_id string, SN []string, T_id []string, Time_start_ string, Time_end_ string) (minT, maxT float64, minTime, maxTime time.Time) { o := orm.NewOrm() sql_condition := "" if len(Time_start_) > 1 { sql_condition += " AND t_time >= '" + Time_start_ + "'" } if len(Time_end_) > 1 { sql_condition += " AND t_time <= '" + Time_end_ + "'" } if len(T_id) > 0 || len(SN) > 0 { sql_condition += " AND t_id in (" + strings.Join(T_id, ",") + ") OR t_sn in (" + strings.Join(SN, ",") + ")" } if len(sql_condition) > 0 { sql_condition = " WHERE " + strings.TrimLeft(sql_condition, " AND ") } //fmt.Println("maps_z;",maps_z[0][0]) sql := "SELECT MIN(t_t) AS min_t, MAX(t_t) AS max_t,MIN(t_time) AS min_time, MAX(t_time) AS max_time FROM z_task_data_" + T_task_id + sql_condition fmt.Println(sql) err := o.Raw(sql).QueryRow(&minT, &maxT, &minTime, &maxTime) if err != nil { logs.Error(lib.FuncName(), err) } return } // 获取线上设备数据 func Read_DeviceData_T_Min_Max_Time_Min_Max(T_sn, T_id string, Time_start_ string, Time_end_ string) (minT, maxT float64, minTime, maxTime time.Time) { o := orm2.NewOrmUsingDB(conf.Server_AliasName) sql_condition := "" if len(Time_start_) > 1 { sql_condition += " AND t_time >= '" + Time_start_ + "'" } if len(Time_end_) > 1 { sql_condition += " AND t_time <= '" + Time_end_ + "'" } sql_condition += " AND t_id = '" + T_id + "'" if len(sql_condition) > 0 { sql_condition = " WHERE " + strings.TrimLeft(sql_condition, " AND ") } //fmt.Println("maps_z;",maps_z[0][0]) sql := "SELECT MIN(t_t) AS min_t, MAX(t_t) AS max_t,MIN(t_time) AS min_time, MAX(t_time) AS max_time FROM z_devicedata_" + T_sn + sql_condition fmt.Println(sql) err := o.Raw(sql).QueryRow(&minT, &maxT, &minTime, &maxTime) if err != nil { logs.Error(lib.FuncName(), err) } return } func Read_TaskData_ByIds_List(T_task_id string, SN []string, T_id []string, Time_start_ string, Time_end_ string) []TaskData { o := orm.NewOrm() var maps []TaskData sql_condition := "" if len(Time_start_) > 1 { sql_condition += " AND t_time >= '" + Time_start_ + "'" } if len(Time_end_) > 1 { sql_condition += " AND t_time <= '" + Time_end_ + "'" } if len(T_id) > 0 || len(SN) > 0 { sql_condition += " AND (t_id in (" + strings.Join(T_id, ",") + ") OR t_sn in (" + strings.Join(SN, ",") + "))" } if len(sql_condition) > 0 { sql_condition = " WHERE " + strings.TrimLeft(sql_condition, " AND ") } //fmt.Println("maps_z;",maps_z[0][0]) sql := "SELECT ID,t_sn,t_id,t_t,t_rh,t_time FROM z_task_data_" + T_task_id + sql_condition + " ORDER BY t_time" fmt.Println(sql) _, err := o.Raw(sql).QueryRows(&maps) if err != nil { logs.Error(lib.FuncName(), err) } return maps } // 按照 t_id 分组数据 func GroupDataByTID(data []TaskData) map[string][]TaskData { groupedData := make(map[string][]TaskData) for _, d := range data { groupedData[d.T_id] = append(groupedData[d.T_id], d) } return groupedData } // 删除的重复数据 func DeleteDeduplicate(T_task_id string) (int64, error) { localOrm := orm2.NewOrmUsingDB(conf.Local_AliasName) tb_name := "z_task_data_" + T_task_id var cnt int64 // 创建临时表 sqlCreate := "CREATE TABLE `tmp_table` AS (SELECT MIN(`ID`) AS `min_id` FROM " + tb_name + " GROUP BY `t_sn`,`t_id`,`t_time`);" _, err := localOrm.Raw(sqlCreate).Exec() if err != nil { logs.Error(lib.FuncName(), err) return cnt, err } sqlDelete := "DELETE FROM " + tb_name + " WHERE `ID` NOT IN (SELECT `min_id` FROM `tmp_table`);" res, err := localOrm.Raw(sqlDelete).Exec() if err != nil { logs.Error(lib.FuncName(), err) return cnt, err } cnt, _ = res.RowsAffected() sqlDrop := "DROP TABLE `tmp_table`;" _, err = localOrm.Raw(sqlDrop).Exec() if err != nil { logs.Error(lib.FuncName(), err) return cnt, err } return cnt, nil } func CheckTableExist(alias_name, T_task_id string) bool { o := orm2.NewOrmUsingDB(alias_name) sql := "select count(ID) from z_task_data_" + T_task_id _, err := o.Raw(sql).Exec() if err != nil { // 数据库表不存在,则创建数据库 logs.Error(lib.FuncName(), err) if err.(*mysql.MySQLError).Number == 1146 { return false } } return true }