The fantastic ORM library for Golang, aims to be developer friendly, check http://gorm.io/community.html#Ask before ask
Hey all, I'm currently trying to do some more complex queries (filtered LEFT JOINS) using gorm. Sadly it seems like I have to build these queries using Raw statements. What I'm a little bit wondering is, that I have to do some additional Preloading.
So
delegated := []model.Domain{}
err := s.db.Raw("SELECT * FROM domains LEFT JOIN delegations ON delegations.domain_id = domains.id AND delegations.deleted_at IS NULL WHERE (delegations.user = ? OR domains.owner = ?) AND domains.deleted_at IS NULL", owner, owner).Find(&delegated).Error
Does not provide me the nested list, even it is actually returned by the query.
type Address struct {
ID int64 json:"id"
UserID User gorm:"association_foreignkey:user_id"
UserId int gorm:"column:user_id"
// Guest users may place an order, so they should be able to create an address with nullable UserId
StreetAddress string json:"street_address"
City string json:"city"
Country string json:"country"
Postal string json:"postal"
State string json:"state"
CreatedAt time.Time json:"created_at"
UpdatedAt time.Time json:"updated_at"
// DeletedAt time.Time json:"deleted_at"
FirstName string json:"first_name"
LastName string json:"last_name"
Orders []Order gorm:"foreignKey:AddressId"
}
func (s Content) FetchAddressesPage(userId int, page int64, pageSize int64, includeUser bool) (address []Address, totalAddressesCount int) {
address = []Address{}
fmt.Print("value of the addressses\n", address)
s.dbc.Model(&address).Where(&Address{UserId: userId}).Count(&totalAddressesCount)
// s.dbc.Where(&Address{UserId: userId}).
// Offset((page - 1) * pageSize).Limit(pageSize).
// Preload("UserID"). // doesn't works fine(try Joins but lookup error)
// Find(&address)
err := s.dbc.Where(&Address{UserId: userId}).
Offset((page - 1) * pageSize).Limit(pageSize).Preload("UserID").
// doesn't works fine(try Joins but lookup error)
Find(&address)
The user's table
type User struct {
// json.Model
ID int json:"id"
FirstName string json:"first_name"
LastName string json:"last_name"
Username string json:"username"
Email string json:"email"
Password string json:"password"
// Comments []Comment `json:"foreignkey:UserId"`
Comments []Comment `json:"comment"`
// Roles []Role `json:"many2many:users_roles;"`
Roles []Role `json:"roles"`
// UserRoles []UserRole `json:"foreignkey:UserId"`
UserRoles []UserRole `json:":user_roles"`
}
Hello! Is it possible to get time.Time in the timezone of the selected column? I am using gorm with Clickhouse and doing a query on the form of:
select date_trunc('day', date, 'Europe/Stockholm') from table_name;
which returns the date truncated correctly however gorm returns it in UTC instead of Europe/Stockholm. The time is correct but it would help a lot to get it in the timezone used in date_trunc :)
.Save(log)
) with "sql: Scan error on column index 0, name "id": unsupported Scan, storing driver.Value type int64 into type *Log". It seems like .Save()
respects a point to store the Id of the main record and not a point to the NEW Log record to insert. Any ideas?
db.Model(aModel).Join("ARelation, db.Where(aRelationWithFieldsSet)
yields a left outer join against the relation, so the conditions in the where clause are ignored. I'd like to be able to join only against rows that meet a certain condition
type u1 struct {
gorm.Model
OrderNo string json:"orderNo" gorm:"uniqueIndex:idx_order_no;comment:ddd"
Name string json:"name" comment:"名称" validate:"required"
Urs []ur1 json:"urs" gorm:"foreignkey:UId;references:ID"
}
type ur1 struct {
gorm.Model
UId uint json:"uId" gorm:"uniqueIndex:idx_uid;comment:ddd"
R r1 json:"r" gorm:"foreignkey:UrId;references:ID"
Desc string json:"desc"
}
type r1 struct {
gorm.Model
UrId uint json:"urId" gorm:"uniqueIndex:idx_urid;comment:ddd"
Name string json:"name" comment:"名称"
}
func TestEnableMysql21(t *testing.T) {
db, err := datasource.EnableMysql2(module.MysqlConf{
Address: "127.0.0.1:3306",
Username: "test2",
Password: "123456",
DbName: "test2",
Prefix: "",
MaxOpenConns: 1,
MaxIdleConns: 1,
ConnMaxLifetime: 10,
IsLogMode: true,
NotReplace: true,
})
fmt.Println(err)
db.AutoMigrate(
&u1{},
&ur1{},
&r1{},
)
u:=u1{
OrderNo: "1",
Name: "gy",
Urs: []ur1{
{
R: r1{
Name: "r1",
},
Desc: "urdesc",
},
},
}
db.Clauses(clause.OnConflict{UpdateAll: true}).Create(&u)
}
first time sql:
INSERT INTO r1
(created_at
,updated_at
,deleted_at
,ur_id
,name
) VALUES ('2022-04-13 10:23:48.598','2022-04-13 10:23:48.598',NULL,5,'r1') ON DUPLICATE KEY UPDATE ur_id
=VALUES(ur_id
)
INSERT INTO ur1
(created_at
,updated_at
,deleted_at
,uid
,desc
) VALUES ('2022-04-13 10:23:48.598','2022-04-13 10:23:48.598',NULL,5,'urdesc') ON DUPLICATE KEY UPDATE uid
=VALUES(uid
)
INSERT INTO u1
(created_at
,updated_at
,deleted_at
,order_no
,name
) VALUES ('2022-04-13 10:23:48.597','2022-04-13 10:23:48.597',NULL,'1','gy') ON DUPLICATE KEY UPDATE updated_at
='2022-04-13 10:23:48.597',deleted_at
=VALUES(deleted_at
),order_no
=VALUES(order_no
),name
=VALUES(name
)
but second time sql: I want update all r1 ur_id = 0
INSERT INTO r1
(created_at
,updated_at
,deleted_at
,ur_id
,name
) VALUES ('2022-04-13 10:24:40.518','2022-04-13 10:24:40.518',NULL,0,'r1') ON DUPLICATE KEY UPDATE ur_id
=VALUES(ur_id
)
INSERT INTO ur1
(created_at
,updated_at
,deleted_at
,uid
,desc
) VALUES ('2022-04-13 10:24:40.517','2022-04-13 10:24:40.517',NULL,5,'urdesc') ON DUPLICATE KEY UPDATE uid
=VALUES(uid
)
INSERT INTO u1
(created_at
,updated_at
,deleted_at
,order_no
,name
) VALUES ('2022-04-13 10:24:40.517','2022-04-13 10:24:40.517',NULL,'1','gy') ON DUPLICATE KEY UPDATE updated_at
='2022-04-13 10:24:40.517',deleted_at
=VALUES(deleted_at
),order_no
=VALUES(order_no
),name
=VALUES(name
)