Support multiple scoring criteria

This commit is contained in:
Ian Gulliver
2021-11-24 19:52:22 -08:00
parent 7c0a1e28e6
commit 1c4cf25e34
5 changed files with 176 additions and 79 deletions

View File

@@ -10,40 +10,40 @@ var opCodes = []vm.OpCodeType{
vm.OpMov, vm.OpMov,
vm.OpAdd, vm.OpAdd,
vm.OpSub, // vm.OpSub,
vm.OpMul, vm.OpMul,
vm.OpDivU, // vm.OpDivU,
vm.OpDivS, // vm.OpDivS,
vm.OpNot, // vm.OpNot,
vm.OpAnd, // vm.OpAnd,
vm.OpOr, // vm.OpOr,
vm.OpXor, vm.OpXor,
vm.OpShR, // vm.OpShR,
vm.OpShL, // vm.OpShL,
vm.OpEq, // vm.OpEq,
vm.OpLTU, // vm.OpLTU,
vm.OpLTS, // vm.OpLTS,
vm.OpGTU, // vm.OpGTU,
vm.OpGTS, // vm.OpGTS,
vm.OpLTEU, // vm.OpLTEU,
vm.OpLTES, // vm.OpLTES,
vm.OpGTEU, // vm.OpGTEU,
vm.OpGTES, // vm.OpGTES,
vm.OpJmp, // vm.OpJmp,
vm.OpJmpT, // vm.OpJmpT,
vm.OpJmpF, // vm.OpJmpF,
vm.OpCal, // vm.OpCal,
vm.OpCalT, // vm.OpCalT,
vm.OpCalF, // vm.OpCalF,
vm.OpRet, // vm.OpRet,
vm.OpRetT, // vm.OpRetT,
vm.OpRetF, // vm.OpRetF,
vm.OpSqrt, vm.OpSqrt,
} }

View File

@@ -1,9 +1,11 @@
package main package main
import "flag" import "flag"
import "fmt"
import "log" import "log"
import "math/rand" import "math/rand"
import "os" import "os"
import "strings"
import "time" import "time"
import "github.com/firestuff/subcoding/asm" import "github.com/firestuff/subcoding/asm"
@@ -54,6 +56,16 @@ func main() {
log.Fatal(err) log.Fatal(err)
} }
log.Printf("New best score %d / %d (after %d attempts):\n%s", status.BestScore, status.TargetScore, status.Attempts, src) log.Printf("New best score [%s] after %d attempts:\n%s", scoreString(status.BestScores), status.Attempts, src)
} }
} }
func scoreString(scores []*grow.Score) string {
strs := []string{}
for _, score := range scores {
strs = append(strs, fmt.Sprintf("%d / %d", score.Current, score.Total))
}
return strings.Join(strs, ", ")
}

View File

@@ -1,6 +1,7 @@
package grow package grow
import "io" import "io"
import "sort"
import "gopkg.in/yaml.v2" import "gopkg.in/yaml.v2"
@@ -15,6 +16,14 @@ type Definition struct {
InstructionsPerFunctionStdDev uint64 `yaml:"instructions_per_function_std_dev"` InstructionsPerFunctionStdDev uint64 `yaml:"instructions_per_function_std_dev"`
Samples []*Sample `yaml:"samples"` Samples []*Sample `yaml:"samples"`
// Sample indices ranked by each output dimension
SampleRanks [][]int
}
type Score struct {
Current uint64
Total uint64
} }
func NewDefinition(r io.Reader) (*Definition, error) { func NewDefinition(r io.Reader) (*Definition, error) {
@@ -28,14 +37,20 @@ func NewDefinition(r io.Reader) (*Definition, error) {
return nil, err return nil, err
} }
// TODO: Test & handle non-consistent In and Out dimensions
def.buildSampleRanks()
return def, nil return def, nil
} }
func (def *Definition) Grow(statusChan chan<- Status) (*vm.Program, error) { func (def *Definition) Grow(statusChan chan<- Status) (*vm.Program, error) {
status := Status{ if statusChan != nil {
TargetScore: def.sumOuts(), defer close(statusChan)
} }
status := Status{}
if statusChan != nil { if statusChan != nil {
statusChan <- status statusChan <- status
} }
@@ -54,70 +69,153 @@ func (def *Definition) Grow(statusChan chan<- Status) (*vm.Program, error) {
Mutate(def, prog) Mutate(def, prog)
score, err := def.score(prog) scores, err := def.score(prog)
if err != nil { if err != nil {
// Can never get best score // Can never get best score
continue continue
} }
if score > status.BestScore { if !def.scoreIsBetter(status.BestScores, scores) {
err = def.minifyProgram(prog) continue
if err != nil {
if statusChan != nil {
close(statusChan)
} }
err = def.minifyProgram(prog)
if err != nil {
return nil, err return nil, err
} }
status.BestScore = score status.BestScores = scores
status.BestProgram = prog.Copy() status.BestProgram = prog.Copy()
if statusChan != nil { if statusChan != nil {
statusChan <- status statusChan <- status
} }
if status.BestScore == status.TargetScore { if status.BestScores[0].Current == status.BestScores[0].Total {
if statusChan != nil {
close(statusChan)
}
return prog, nil return prog, nil
} }
} }
}
} }
func (def *Definition) score(prog *vm.Program) (uint64, error) { func (def *Definition) buildSampleRanks() {
score := uint64(0) for col := 0; col < len(def.Samples[0].Out); col++ {
rank := []int{}
for i := 0; i < len(def.Samples); i++ {
rank = append(rank, i)
}
sort.SliceStable(rank, func(i, j int) bool {
return def.Samples[i].Out[col] < def.Samples[j].Out[col]
})
def.SampleRanks = append(def.SampleRanks, rank)
}
}
func (def *Definition) score(prog *vm.Program) ([]*Score, error) {
outputs := [][]uint64{}
for _, sample := range def.Samples { for _, sample := range def.Samples {
state, err := vm.NewState(prog) state, err := vm.NewState(prog)
if err != nil { if err != nil {
return 0, err return nil, err
} }
sample.SetInputs(state) sample.SetInputs(state)
err = state.Execute() err = state.Execute()
if err != nil { if err != nil {
return 0, err return nil, err
} }
score += sample.matchingOuts(state) output := []uint64{}
for i := 0; i < len(def.Samples[0].Out); i++ {
// TODO: Handle signedness?
output = append(output, state.GlobalMemory().MustReadUnsigned(uint64(i)))
}
outputs = append(outputs, output)
} }
return score, nil // TODO: Points for proximity to target values?
// TODO: Points for correlation coeficient with target values across samples?
return []*Score{
def.scoreMatching(outputs),
def.scoreRank(outputs),
}, nil
} }
func (def *Definition) sumOuts() uint64 { func (def *Definition) scoreMatching(outputs [][]uint64) *Score {
sum := uint64(0) ret := &Score{}
for _, sample := range def.Samples { for s, sample := range def.Samples {
sum += uint64(len(sample.Out)) for o, out := range sample.Out {
ret.Total++
if outputs[s][o] == out {
ret.Current++
}
}
} }
return sum return ret
}
func (def *Definition) scoreRank(outputs [][]uint64) *Score {
ranks := [][]int{}
for col := 0; col < len(outputs[0]); col++ {
rank := []int{}
for i := 0; i < len(def.Samples); i++ {
rank = append(rank, i)
}
sort.SliceStable(rank, func(i, j int) bool {
return outputs[i][col] < outputs[j][col]
})
ranks = append(ranks, rank)
}
ret := &Score{}
for col, vals := range ranks {
for i, val := range vals {
ret.Total++
if val == def.SampleRanks[col][i] {
ret.Current++
}
}
}
return ret
}
func (def *Definition) scoreIsBetter(old, new []*Score) bool {
if old == nil {
return true
}
for i, score := range new {
best := old[i]
switch {
case score.Current == best.Current:
continue
case score.Current > best.Current:
return true
case score.Current < best.Current:
return false
}
}
// Unchanged
return false
} }
func (def *Definition) minifyProgram(prog *vm.Program) error { func (def *Definition) minifyProgram(prog *vm.Program) error {
@@ -132,7 +230,7 @@ func (def *Definition) minifyProgram(prog *vm.Program) error {
} }
func (def *Definition) minifyFunction(prog *vm.Program, f int) error { func (def *Definition) minifyFunction(prog *vm.Program, f int) error {
baseScore, err := def.score(prog) baseScores, err := def.score(prog)
if err != nil { if err != nil {
return err return err
} }
@@ -147,8 +245,9 @@ func (def *Definition) minifyFunction(prog *vm.Program, f int) error {
copy(tmp, prog.Functions[f].Instructions) copy(tmp, prog.Functions[f].Instructions)
prog.Functions[f].Instructions = append(tmp[:i], tmp[i+1:]...) prog.Functions[f].Instructions = append(tmp[:i], tmp[i+1:]...)
newScore, err := def.score(prog) newScores, err := def.score(prog)
if err == nil && newScore >= baseScore { // XXX: Use all scores
if err == nil && newScores[0].Current >= baseScores[0].Current {
loop = true loop = true
break break
} else { } else {

View File

@@ -12,15 +12,3 @@ func (s *Sample) SetInputs(state *vm.State) {
state.GlobalMemory().WriteUnsigned(uint64(i), val) state.GlobalMemory().WriteUnsigned(uint64(i), val)
} }
} }
func (s *Sample) matchingOuts(state *vm.State) uint64 {
ret := uint64(0)
for i, val := range s.Out {
if state.GlobalMemory().MustReadUnsigned(uint64(i)) == val {
ret++
}
}
return ret
}

View File

@@ -3,10 +3,8 @@ package grow
import "github.com/firestuff/subcoding/vm" import "github.com/firestuff/subcoding/vm"
type Status struct { type Status struct {
TargetScore uint64
Attempts uint64 Attempts uint64
BestScore uint64 BestScores []*Score
BestProgram *vm.Program BestProgram *vm.Program
} }