Best Syzkaller code snippet using main.collectUsedFiles
manager.go
Source:manager.go
...188 log.Fatalf("ACHyb: failed to reopen corpus database: %v", err)189 }190 // Create HTTP server.191 mgr.initHTTP()192 mgr.collectUsedFiles()193 // Create RPC server for fuzzers.194 mgr.port, err = startRPCServer(mgr)195 if err != nil {196 log.Fatalf("failed to create rpc server: %v", err)197 }198 if cfg.DashboardAddr != "" {199 mgr.dash = dashapi.New(cfg.DashboardClient, cfg.DashboardAddr, cfg.DashboardKey)200 }201 go func() {202 for lastTime := time.Now(); ; {203 time.Sleep(10 * time.Second)204 now := time.Now()205 diff := now.Sub(lastTime)206 lastTime = now207 mgr.mu.Lock()208 if mgr.firstConnect.IsZero() {209 mgr.mu.Unlock()210 continue211 }212 mgr.fuzzingTime += diff * time.Duration(atomic.LoadUint32(&mgr.numFuzzing))213 executed := mgr.stats.execTotal.get()214 crashes := mgr.stats.crashes.get()215 corpusCover := mgr.stats.corpusCover.get()216 corpusSignal := mgr.stats.corpusSignal.get()217 maxSignal := mgr.stats.maxSignal.get()218 mgr.mu.Unlock()219 numReproducing := atomic.LoadUint32(&mgr.numReproducing)220 numFuzzing := atomic.LoadUint32(&mgr.numFuzzing)221 log.Logf(0, "VMs %v, executed %v, corpus cover %v, corpus signal %v, max signal %v, crashes %v, repro %v",222 numFuzzing, executed, corpusCover, corpusSignal, maxSignal, crashes, numReproducing)223 }224 }()225 if *flagBench != "" {226 f, err := os.OpenFile(*flagBench, os.O_WRONLY|os.O_CREATE|os.O_EXCL, osutil.DefaultFilePerm)227 if err != nil {228 log.Fatalf("failed to open bench file: %v", err)229 }230 go func() {231 for {232 time.Sleep(time.Minute)233 vals := mgr.stats.all()234 mgr.mu.Lock()235 if mgr.firstConnect.IsZero() {236 mgr.mu.Unlock()237 continue238 }239 mgr.minimizeCorpus()240 vals["corpus"] = uint64(len(mgr.corpus))241 vals["uptime"] = uint64(time.Since(mgr.firstConnect)) / 1e9242 vals["fuzzing"] = uint64(mgr.fuzzingTime) / 1e9243 mgr.mu.Unlock()244 data, err := json.MarshalIndent(vals, "", " ")245 if err != nil {246 log.Fatalf("failed to serialize bench data")247 }248 if _, err := f.Write(append(data, '\n')); err != nil {249 log.Fatalf("failed to write bench data")250 }251 }252 }()253 }254 if mgr.dash != nil {255 go mgr.dashboardReporter()256 }257 osutil.HandleInterrupts(vm.Shutdown)258 if mgr.vmPool == nil {259 log.Logf(0, "no VMs started (type=none)")260 log.Logf(0, "you are supposed to start syz-fuzzer manually as:")261 log.Logf(0, "syz-fuzzer -manager=manager.ip:%v [other flags as necessary]", mgr.port)262 <-vm.Shutdown263 return264 }265 mgr.vmLoop()266}267type RunResult struct {268 idx int269 crash *Crash270 err error271}272type ReproResult struct {273 instances []int274 report0 *report.Report // the original report we started reproducing275 res *repro.Result276 stats *repro.Stats277 err error278 hub bool // repro came from hub279}280// Manager needs to be refactored (#605).281// nolint: gocyclo, gocognit, funlen282func (mgr *Manager) vmLoop() {283 log.Logf(0, "booting test machines...")284 log.Logf(0, "wait for the connection from test machine...")285 instancesPerRepro := 4286 vmCount := mgr.vmPool.Count()287 if instancesPerRepro > vmCount {288 instancesPerRepro = vmCount289 }290 bootInstance := make(chan int)291 go func() {292 for i := 0; i < vmCount; i++ {293 bootInstance <- i294 time.Sleep(10 * time.Second)295 }296 }()297 var instances []int298 runDone := make(chan *RunResult, 1)299 pendingRepro := make(map[*Crash]bool)300 reproducing := make(map[string]bool)301 reproInstances := 0302 var reproQueue []*Crash303 reproDone := make(chan *ReproResult, 1)304 stopPending := false305 shutdown := vm.Shutdown306 for shutdown != nil || len(instances) != vmCount {307 mgr.mu.Lock()308 phase := mgr.phase309 mgr.mu.Unlock()310 for crash := range pendingRepro {311 if reproducing[crash.Title] {312 continue313 }314 delete(pendingRepro, crash)315 if !mgr.needRepro(crash) {316 continue317 }318 log.Logf(1, "loop: add to repro queue '%v'", crash.Title)319 reproducing[crash.Title] = true320 reproQueue = append(reproQueue, crash)321 }322 log.Logf(1, "loop: phase=%v shutdown=%v instances=%v/%v %+v repro: pending=%v reproducing=%v queued=%v",323 phase, shutdown == nil, len(instances), vmCount, instances,324 len(pendingRepro), len(reproducing), len(reproQueue))325 canRepro := func() bool {326 return phase >= phaseTriagedHub &&327 len(reproQueue) != 0 && reproInstances+instancesPerRepro <= vmCount328 }329 if shutdown != nil {330 for canRepro() && len(instances) >= instancesPerRepro {331 last := len(reproQueue) - 1332 crash := reproQueue[last]333 reproQueue[last] = nil334 reproQueue = reproQueue[:last]335 vmIndexes := append([]int{}, instances[len(instances)-instancesPerRepro:]...)336 instances = instances[:len(instances)-instancesPerRepro]337 reproInstances += instancesPerRepro338 atomic.AddUint32(&mgr.numReproducing, 1)339 log.Logf(1, "loop: starting repro of '%v' on instances %+v", crash.Title, vmIndexes)340 go func() {341 features := mgr.checkResult.Features342 res, stats, err := repro.Run(crash.Output, mgr.cfg, features, mgr.reporter, mgr.vmPool, vmIndexes)343 reproDone <- &ReproResult{344 instances: vmIndexes,345 report0: crash.Report,346 res: res,347 stats: stats,348 err: err,349 hub: crash.hub,350 }351 }()352 }353 for !canRepro() && len(instances) != 0 {354 last := len(instances) - 1355 idx := instances[last]356 instances = instances[:last]357 log.Logf(1, "loop: starting instance %v", idx)358 go func() {359 crash, err := mgr.runInstance(idx)360 runDone <- &RunResult{idx, crash, err}361 }()362 }363 }364 var stopRequest chan bool365 if !stopPending && canRepro() {366 stopRequest = mgr.vmStop367 }368 wait:369 select {370 case idx := <-bootInstance:371 instances = append(instances, idx)372 case stopRequest <- true:373 log.Logf(1, "loop: issued stop request")374 stopPending = true375 case res := <-runDone:376 log.Logf(1, "loop: instance %v finished, crash=%v", res.idx, res.crash != nil)377 if res.err != nil && shutdown != nil {378 log.Logf(0, "%v", res.err)379 }380 stopPending = false381 instances = append(instances, res.idx)382 // On shutdown qemu crashes with "qemu: terminating on signal 2",383 // which we detect as "lost connection". Don't save that as crash.384 if shutdown != nil && res.crash != nil {385 needRepro := mgr.saveCrash(res.crash)386 if needRepro {387 log.Logf(1, "loop: add pending repro for '%v'", res.crash.Title)388 pendingRepro[res.crash] = true389 }390 }391 case res := <-reproDone:392 atomic.AddUint32(&mgr.numReproducing, ^uint32(0))393 crepro := false394 title := ""395 if res.res != nil {396 crepro = res.res.CRepro397 title = res.res.Report.Title398 }399 log.Logf(1, "loop: repro on %+v finished '%v', repro=%v crepro=%v desc='%v'",400 res.instances, res.report0.Title, res.res != nil, crepro, title)401 if res.err != nil {402 log.Logf(0, "repro failed: %v", res.err)403 }404 delete(reproducing, res.report0.Title)405 instances = append(instances, res.instances...)406 reproInstances -= instancesPerRepro407 if res.res == nil {408 if !res.hub {409 mgr.saveFailedRepro(res.report0, res.stats)410 }411 } else {412 mgr.saveRepro(res.res, res.stats, res.hub)413 }414 case <-shutdown:415 log.Logf(1, "loop: shutting down...")416 shutdown = nil417 case crash := <-mgr.hubReproQueue:418 log.Logf(1, "loop: get repro from hub")419 pendingRepro[crash] = true420 case reply := <-mgr.needMoreRepros:421 reply <- phase >= phaseTriagedHub &&422 len(reproQueue)+len(pendingRepro)+len(reproducing) == 0423 goto wait424 case reply := <-mgr.reproRequest:425 repros := make(map[string]bool)426 for title := range reproducing {427 repros[title] = true428 }429 reply <- repros430 goto wait431 }432 }433}434func (mgr *Manager) loadCorpus() {435 // By default we don't re-minimize/re-smash programs from corpus,436 // it takes lots of time on start and is unnecessary.437 // However, on version bumps we can selectively re-minimize/re-smash.438 minimized, smashed := true, true439 switch mgr.corpusDB.Version {440 case 0:441 // Version 0 had broken minimization, so we need to re-minimize.442 minimized = false443 fallthrough444 case 1:445 // Version 1->2: memory is preallocated so lots of mmaps become unnecessary.446 minimized = false447 fallthrough448 case 2:449 // Version 2->3: big-endian hints.450 smashed = false451 fallthrough452 case 3:453 // Version 3->4: to shake things up.454 minimized = false455 fallthrough456 case currentDBVersion:457 }458 broken := 0459 for key, rec := range mgr.corpusDB.Records {460 bad, disabled := checkProgram(mgr.target, mgr.targetEnabledSyscalls, rec.Val)461 if bad {462 mgr.corpusDB.Delete(key)463 broken++464 continue465 }466 if disabled {467 // This program contains a disabled syscall.468 // We won't execute it, but remember its hash so469 // it is not deleted during minimization.470 mgr.disabledHashes[hash.String(rec.Val)] = struct{}{}471 continue472 }473 mgr.candidates = append(mgr.candidates, rpctype.RPCCandidate{474 Prog: rec.Val,475 Minimized: minimized,476 Smashed: smashed,477 })478 }479 mgr.fresh = len(mgr.corpusDB.Records) == 0480 log.Logf(0, "%-24v: %v",481 "corpus", len(mgr.candidates))482 // Now this is ugly.483 // We duplicate all inputs in the corpus and shuffle the second part.484 // This solves the following problem. A fuzzer can crash while triaging candidates,485 // in such case it will also lost all cached candidates. Or, the input can be somewhat flaky486 // and doesn't give the coverage on first try. So we give each input the second chance.487 // Shuffling should alleviate deterministically losing the same inputs on fuzzer crashing.488 mgr.candidates = append(mgr.candidates, mgr.candidates...)489 shuffle := mgr.candidates[len(mgr.candidates)/2:]490 rand.Shuffle(len(shuffle), func(i, j int) {491 shuffle[i], shuffle[j] = shuffle[j], shuffle[i]492 })493 if mgr.phase != phaseInit {494 panic(fmt.Sprintf("loadCorpus: bad phase %v", mgr.phase))495 }496 mgr.phase = phaseLoadedCorpus497}498func checkProgram(target *prog.Target, enabled map[*prog.Syscall]bool, data []byte) (bad, disabled bool) {499 p, err := target.Deserialize(data, prog.NonStrict)500 if err != nil {501 return true, true502 }503 if len(p.Calls) > prog.MaxCalls {504 return true, true505 }506 for _, c := range p.Calls {507 if !enabled[c.Meta] {508 return false, true509 }510 }511 return false, false512}513func (mgr *Manager) runInstance(index int) (*Crash, error) {514 mgr.checkUsedFiles()515 inst, err := mgr.vmPool.Create(index)516 if err != nil {517 return nil, fmt.Errorf("failed to create instance: %v", err)518 }519 defer inst.Close()520 fwdAddr, err := inst.Forward(mgr.port)521 if err != nil {522 return nil, fmt.Errorf("failed to setup port forwarding: %v", err)523 }524 fuzzerBin, err := inst.Copy(mgr.cfg.SyzFuzzerBin)525 if err != nil {526 return nil, fmt.Errorf("failed to copy binary: %v", err)527 }528 // If SyzExecutorCmd is provided, it means that syz-executor is already in529 // the image, so no need to copy it.530 executorCmd := targets.Get(mgr.cfg.TargetOS, mgr.cfg.TargetArch).SyzExecutorCmd531 if executorCmd == "" {532 executorCmd, err = inst.Copy(mgr.cfg.SyzExecutorBin)533 if err != nil {534 return nil, fmt.Errorf("failed to copy binary: %v", err)535 }536 }537 fuzzerV := 0538 procs := mgr.cfg.Procs539 if *flagDebug {540 fuzzerV = 100541 procs = 1542 }543 // Run the fuzzer binary.544 start := time.Now()545 atomic.AddUint32(&mgr.numFuzzing, 1)546 defer atomic.AddUint32(&mgr.numFuzzing, ^uint32(0))547 cmd := instance.FuzzerCmd(fuzzerBin, executorCmd, fmt.Sprintf("vm-%v", index),548 mgr.cfg.TargetOS, mgr.cfg.TargetArch, fwdAddr, mgr.cfg.Sandbox, procs, fuzzerV,549 mgr.cfg.Cover, *flagDebug, false, false)550 outc, errc, err := inst.Run(time.Hour, mgr.vmStop, cmd)551 if err != nil {552 return nil, fmt.Errorf("failed to run fuzzer: %v", err)553 }554 rep := inst.MonitorExecution(outc, errc, mgr.reporter, vm.ExitTimeout)555 if rep == nil {556 // This is the only "OK" outcome.557 log.Logf(0, "vm-%v: running for %v, restarting", index, time.Since(start))558 return nil, nil559 }560 crash := &Crash{561 vmIndex: index,562 hub: false,563 Report: rep,564 }565 return crash, nil566}567func (mgr *Manager) emailCrash(crash *Crash) {568 if len(mgr.cfg.EmailAddrs) == 0 {569 return570 }571 args := []string{"-s", "syzkaller: " + crash.Title}572 args = append(args, mgr.cfg.EmailAddrs...)573 log.Logf(0, "sending email to %v", mgr.cfg.EmailAddrs)574 cmd := exec.Command("mailx", args...)575 cmd.Stdin = bytes.NewReader(crash.Report.Report)576 if _, err := osutil.Run(10*time.Minute, cmd); err != nil {577 log.Logf(0, "failed to send email: %v", err)578 }579}580func (mgr *Manager) saveCrash(crash *Crash) bool {581 if crash.Type == report.MemoryLeak {582 mgr.mu.Lock()583 mgr.memoryLeakFrames[crash.Frame] = true584 mgr.mu.Unlock()585 }586 if crash.Type == report.DataRace {587 mgr.mu.Lock()588 mgr.dataRaceFrames[crash.Frame] = true589 mgr.mu.Unlock()590 }591 if crash.Suppressed {592 log.Logf(0, "vm-%v: suppressed crash %v", crash.vmIndex, crash.Title)593 mgr.stats.crashSuppressed.inc()594 return false595 }596 corrupted := ""597 if crash.Corrupted {598 corrupted = " [corrupted]"599 }600 log.Logf(0, "vm-%v: crash: %v%v", crash.vmIndex, crash.Title, corrupted)601 if err := mgr.reporter.Symbolize(crash.Report); err != nil {602 log.Logf(0, "failed to symbolize report: %v", err)603 }604 mgr.stats.crashes.inc()605 mgr.mu.Lock()606 if !mgr.crashTypes[crash.Title] {607 mgr.crashTypes[crash.Title] = true608 mgr.stats.crashTypes.inc()609 }610 mgr.mu.Unlock()611 if mgr.dash != nil {612 if crash.Type == report.MemoryLeak {613 return true614 }615 dc := &dashapi.Crash{616 BuildID: mgr.cfg.Tag,617 Title: crash.Title,618 Corrupted: crash.Corrupted,619 Maintainers: crash.Maintainers,620 Log: crash.Output,621 Report: crash.Report.Report,622 }623 resp, err := mgr.dash.ReportCrash(dc)624 if err != nil {625 log.Logf(0, "failed to report crash to dashboard: %v", err)626 } else {627 // Don't store the crash locally, if we've successfully628 // uploaded it to the dashboard. These will just eat disk space.629 return resp.NeedRepro630 }631 }632 sig := hash.Hash([]byte(crash.Title))633 id := sig.String()634 dir := filepath.Join(mgr.crashdir, id)635 osutil.MkdirAll(dir)636 if err := osutil.WriteFile(filepath.Join(dir, "description"), []byte(crash.Title+"\n")); err != nil {637 log.Logf(0, "failed to write crash: %v", err)638 }639 // Save up to 100 reports. If we already have 100, overwrite the oldest one.640 // Newer reports are generally more useful. Overwriting is also needed641 // to be able to understand if a particular bug still happens or already fixed.642 oldestI := 0643 var oldestTime time.Time644 for i := 0; i < 100; i++ {645 info, err := os.Stat(filepath.Join(dir, fmt.Sprintf("log%v", i)))646 if err != nil {647 oldestI = i648 if i == 0 {649 go mgr.emailCrash(crash)650 }651 break652 }653 if oldestTime.IsZero() || info.ModTime().Before(oldestTime) {654 oldestI = i655 oldestTime = info.ModTime()656 }657 }658 osutil.WriteFile(filepath.Join(dir, fmt.Sprintf("log%v", oldestI)), crash.Output)659 if len(mgr.cfg.Tag) > 0 {660 osutil.WriteFile(filepath.Join(dir, fmt.Sprintf("tag%v", oldestI)), []byte(mgr.cfg.Tag))661 }662 if len(crash.Report.Report) > 0 {663 osutil.WriteFile(filepath.Join(dir, fmt.Sprintf("report%v", oldestI)), crash.Report.Report)664 }665 return mgr.needLocalRepro(crash)666}667const maxReproAttempts = 3668func (mgr *Manager) needLocalRepro(crash *Crash) bool {669 if !mgr.cfg.Reproduce || crash.Corrupted {670 return false671 }672 if mgr.checkResult == nil || (mgr.checkResult.Features[host.FeatureLeak].Enabled &&673 crash.Type != report.MemoryLeak) {674 // Leak checking is very slow, don't bother reproducing other crashes.675 return false676 }677 sig := hash.Hash([]byte(crash.Title))678 dir := filepath.Join(mgr.crashdir, sig.String())679 if osutil.IsExist(filepath.Join(dir, "repro.prog")) {680 return false681 }682 for i := 0; i < maxReproAttempts; i++ {683 if !osutil.IsExist(filepath.Join(dir, fmt.Sprintf("repro%v", i))) {684 return true685 }686 }687 return false688}689func (mgr *Manager) needRepro(crash *Crash) bool {690 if crash.hub {691 return true692 }693 if mgr.dash == nil {694 return mgr.needLocalRepro(crash)695 }696 if crash.Type == report.MemoryLeak {697 return true698 }699 cid := &dashapi.CrashID{700 BuildID: mgr.cfg.Tag,701 Title: crash.Title,702 Corrupted: crash.Corrupted,703 }704 needRepro, err := mgr.dash.NeedRepro(cid)705 if err != nil {706 log.Logf(0, "dashboard.NeedRepro failed: %v", err)707 }708 return needRepro709}710func (mgr *Manager) saveFailedRepro(rep *report.Report, stats *repro.Stats) {711 if rep.Type == report.MemoryLeak {712 // Don't send failed leak repro attempts to dashboard713 // as we did not send the crash itself.714 return715 }716 if mgr.dash != nil {717 cid := &dashapi.CrashID{718 BuildID: mgr.cfg.Tag,719 Title: rep.Title,720 }721 if err := mgr.dash.ReportFailedRepro(cid); err != nil {722 log.Logf(0, "failed to report failed repro to dashboard: %v", err)723 } else {724 return725 }726 }727 dir := filepath.Join(mgr.crashdir, hash.String([]byte(rep.Title)))728 osutil.MkdirAll(dir)729 for i := 0; i < maxReproAttempts; i++ {730 name := filepath.Join(dir, fmt.Sprintf("repro%v", i))731 if !osutil.IsExist(name) {732 saveReproStats(name, stats)733 break734 }735 }736}737func (mgr *Manager) saveRepro(res *repro.Result, stats *repro.Stats, hub bool) {738 rep := res.Report739 if err := mgr.reporter.Symbolize(rep); err != nil {740 log.Logf(0, "failed to symbolize repro: %v", err)741 }742 opts := fmt.Sprintf("# %+v\n", res.Opts)743 prog := res.Prog.Serialize()744 // Append this repro to repro list to send to hub if it didn't come from hub originally.745 if !hub {746 progForHub := []byte(fmt.Sprintf("# %+v\n# %v\n# %v\n%s",747 res.Opts, res.Report.Title, mgr.cfg.Tag, prog))748 mgr.mu.Lock()749 mgr.newRepros = append(mgr.newRepros, progForHub)750 mgr.mu.Unlock()751 }752 var cprogText []byte753 if res.CRepro {754 cprog, err := csource.Write(res.Prog, res.Opts)755 if err == nil {756 formatted, err := csource.Format(cprog)757 if err == nil {758 cprog = formatted759 }760 cprogText = cprog761 } else {762 log.Logf(0, "failed to write C source: %v", err)763 }764 }765 if mgr.dash != nil {766 // Note: we intentionally don't set Corrupted for reproducers:767 // 1. This is reproducible so can be debugged even with corrupted report.768 // 2. Repro re-tried 3 times and still got corrupted report at the end,769 // so maybe corrupted report detection is broken.770 // 3. Reproduction is expensive so it's good to persist the result.771 dc := &dashapi.Crash{772 BuildID: mgr.cfg.Tag,773 Title: res.Report.Title,774 Maintainers: res.Report.Maintainers,775 Log: res.Report.Output,776 Report: res.Report.Report,777 ReproOpts: res.Opts.Serialize(),778 ReproSyz: res.Prog.Serialize(),779 ReproC: cprogText,780 }781 if _, err := mgr.dash.ReportCrash(dc); err != nil {782 log.Logf(0, "failed to report repro to dashboard: %v", err)783 } else {784 // Don't store the crash locally, if we've successfully785 // uploaded it to the dashboard. These will just eat disk space.786 return787 }788 }789 dir := filepath.Join(mgr.crashdir, hash.String([]byte(rep.Title)))790 osutil.MkdirAll(dir)791 if err := osutil.WriteFile(filepath.Join(dir, "description"), []byte(rep.Title+"\n")); err != nil {792 log.Logf(0, "failed to write crash: %v", err)793 }794 osutil.WriteFile(filepath.Join(dir, "repro.prog"), append([]byte(opts), prog...))795 if len(mgr.cfg.Tag) > 0 {796 osutil.WriteFile(filepath.Join(dir, "repro.tag"), []byte(mgr.cfg.Tag))797 }798 if len(rep.Output) > 0 {799 osutil.WriteFile(filepath.Join(dir, "repro.log"), rep.Output)800 }801 if len(rep.Report) > 0 {802 osutil.WriteFile(filepath.Join(dir, "repro.report"), rep.Report)803 }804 if len(cprogText) > 0 {805 osutil.WriteFile(filepath.Join(dir, "repro.cprog"), cprogText)806 }807 saveReproStats(filepath.Join(dir, "repro.stats"), stats)808}809func saveReproStats(filename string, stats *repro.Stats) {810 text := ""811 if stats != nil {812 text = fmt.Sprintf("Extracting prog: %v\nMinimizing prog: %v\n"+813 "Simplifying prog options: %v\nExtracting C: %v\nSimplifying C: %v\n\n\n%s",814 stats.ExtractProgTime, stats.MinimizeProgTime,815 stats.SimplifyProgTime, stats.ExtractCTime, stats.SimplifyCTime, stats.Log)816 }817 osutil.WriteFile(filename, []byte(text))818}819func (mgr *Manager) getMinimizedCorpus() (corpus, repros [][]byte) {820 mgr.mu.Lock()821 defer mgr.mu.Unlock()822 mgr.minimizeCorpus()823 corpus = make([][]byte, 0, len(mgr.corpus))824 for _, inp := range mgr.corpus {825 corpus = append(corpus, inp.Prog)826 }827 repros = mgr.newRepros828 mgr.newRepros = nil829 return830}831func (mgr *Manager) addNewCandidates(progs [][]byte) {832 candidates := make([]rpctype.RPCCandidate, len(progs))833 for i, inp := range progs {834 candidates[i] = rpctype.RPCCandidate{835 Prog: inp,836 Minimized: false, // don't trust programs from hub837 Smashed: false,838 }839 }840 mgr.mu.Lock()841 defer mgr.mu.Unlock()842 mgr.candidates = append(mgr.candidates, candidates...)843 if mgr.phase == phaseTriagedCorpus {844 mgr.phase = phaseQueriedHub845 }846}847func (mgr *Manager) minimizeCorpus() {848 if mgr.phase < phaseLoadedCorpus || len(mgr.corpus) <= mgr.lastMinCorpus*103/100 {849 return850 }851 inputs := make([]signal.Context, 0, len(mgr.corpus))852 for _, inp := range mgr.corpus {853 inputs = append(inputs, signal.Context{854 Signal: inp.Signal.Deserialize(),855 Context: inp,856 })857 }858 newCorpus := make(map[string]rpctype.RPCInput)859 // Note: inputs are unsorted (based on map iteration).860 // This gives some intentional non-determinism during minimization.861 for _, ctx := range signal.Minimize(inputs) {862 inp := ctx.(rpctype.RPCInput)863 newCorpus[hash.String(inp.Prog)] = inp864 }865 log.Logf(1, "minimized corpus: %v -> %v", len(mgr.corpus), len(newCorpus))866 mgr.corpus = newCorpus867 mgr.lastMinCorpus = len(newCorpus)868 // From time to time we get corpus explosion due to different reason:869 // generic bugs, per-OS bugs, problems with fallback coverage, kcov bugs, etc.870 // This has bad effect on the instance and especially on instances871 // connected via hub. Do some per-syscall sanity checking to prevent this.872 for call, info := range mgr.collectSyscallInfoUnlocked() {873 if mgr.cfg.Cover {874 // If we have less than 1K inputs per this call,875 // accept all new inputs unconditionally.876 if info.count < 1000 {877 continue878 }879 // If we have more than 3K already, don't accept any more.880 // Between 1K and 3K look at amount of coverage we are getting from these programs.881 // Empirically, real coverage for the most saturated syscalls is ~30-60882 // per program (even when we have a thousand of them). For explosion883 // case coverage tend to be much lower (~0.3-5 per program).884 if info.count < 3000 && len(info.cov)/info.count >= 10 {885 continue886 }887 } else {888 // If we don't have real coverage, signal is weak.889 // If we have more than several hundreds, there is something wrong.890 if info.count < 300 {891 continue892 }893 }894 if mgr.saturatedCalls[call] {895 continue896 }897 mgr.saturatedCalls[call] = true898 log.Logf(0, "coverage for %v has saturated, not accepting more inputs", call)899 }900 // Don't minimize persistent corpus until fuzzers have triaged all inputs from it.901 if mgr.phase < phaseTriagedCorpus {902 return903 }904 for key := range mgr.corpusDB.Records {905 _, ok1 := mgr.corpus[key]906 _, ok2 := mgr.disabledHashes[key]907 if !ok1 && !ok2 {908 mgr.corpusDB.Delete(key)909 }910 }911 mgr.corpusDB.BumpVersion(currentDBVersion)912}913type CallCov struct {914 count int915 cov cover.Cover916}917func (mgr *Manager) collectSyscallInfo() map[string]*CallCov {918 mgr.mu.Lock()919 defer mgr.mu.Unlock()920 return mgr.collectSyscallInfoUnlocked()921}922func (mgr *Manager) collectSyscallInfoUnlocked() map[string]*CallCov {923 if mgr.checkResult == nil {924 return nil925 }926 calls := make(map[string]*CallCov)927 for _, call := range mgr.checkResult.EnabledCalls[mgr.cfg.Sandbox] {928 calls[mgr.target.Syscalls[call].Name] = new(CallCov)929 }930 for _, inp := range mgr.corpus {931 if calls[inp.Call] == nil {932 calls[inp.Call] = new(CallCov)933 }934 cc := calls[inp.Call]935 cc.count++936 cc.cov.Merge(inp.Cover)937 }938 return calls939}940func (mgr *Manager) fuzzerConnect() ([]rpctype.RPCInput, BugFrames) {941 mgr.mu.Lock()942 defer mgr.mu.Unlock()943 mgr.minimizeCorpus()944 corpus := make([]rpctype.RPCInput, 0, len(mgr.corpus))945 for _, inp := range mgr.corpus {946 corpus = append(corpus, inp)947 }948 memoryLeakFrames := make([]string, 0, len(mgr.memoryLeakFrames))949 for frame := range mgr.memoryLeakFrames {950 memoryLeakFrames = append(memoryLeakFrames, frame)951 }952 dataRaceFrames := make([]string, 0, len(mgr.dataRaceFrames))953 for frame := range mgr.dataRaceFrames {954 dataRaceFrames = append(dataRaceFrames, frame)955 }956 return corpus, BugFrames{memoryLeaks: memoryLeakFrames, dataRaces: dataRaceFrames}957}958func (mgr *Manager) machineChecked(a *rpctype.CheckArgs, enabledSyscalls map[*prog.Syscall]bool) {959 mgr.mu.Lock()960 defer mgr.mu.Unlock()961 if len(mgr.cfg.EnabledSyscalls) != 0 && len(a.DisabledCalls[mgr.cfg.Sandbox]) != 0 {962 disabled := make(map[string]string)963 for _, dc := range a.DisabledCalls[mgr.cfg.Sandbox] {964 disabled[mgr.target.Syscalls[dc.ID].Name] = dc.Reason965 }966 for _, id := range mgr.configEnabledSyscalls {967 name := mgr.target.Syscalls[id].Name968 if reason := disabled[name]; reason != "" {969 log.Logf(0, "disabling %v: %v", name, reason)970 }971 }972 }973 if a.Error != "" {974 log.Fatalf("machine check: %v", a.Error)975 }976 log.Logf(0, "machine check:")977 log.Logf(0, "%-24v: %v/%v", "syscalls", len(enabledSyscalls), len(mgr.target.Syscalls))978 for _, feat := range a.Features.Supported() {979 log.Logf(0, "%-24v: %v", feat.Name, feat.Reason)980 }981 mgr.checkResult = a982 mgr.targetEnabledSyscalls = enabledSyscalls983 mgr.loadCorpus()984 mgr.firstConnect = time.Now()985}986func (mgr *Manager) newInput(inp rpctype.RPCInput, sign signal.Signal) bool {987 mgr.mu.Lock()988 defer mgr.mu.Unlock()989 if mgr.saturatedCalls[inp.Call] {990 return false991 }992 sig := hash.String(inp.Prog)993 if old, ok := mgr.corpus[sig]; ok {994 // The input is already present, but possibly with diffent signal/coverage/call.995 sign.Merge(old.Signal.Deserialize())996 old.Signal = sign.Serialize()997 var cov cover.Cover998 cov.Merge(old.Cover)999 cov.Merge(inp.Cover)1000 old.Cover = cov.Serialize()1001 mgr.corpus[sig] = old1002 } else {1003 mgr.corpus[sig] = inp1004 mgr.corpusDB.Save(sig, inp.Prog, 0)1005 if err := mgr.corpusDB.Flush(); err != nil {1006 log.Logf(0, "failed to save corpus database: %v", err)1007 }1008 }1009 return true1010}1011func (mgr *Manager) candidateBatch(size int) []rpctype.RPCCandidate {1012 mgr.mu.Lock()1013 defer mgr.mu.Unlock()1014 var res []rpctype.RPCCandidate1015 for i := 0; i < size && len(mgr.candidates) > 0; i++ {1016 last := len(mgr.candidates) - 11017 res = append(res, mgr.candidates[last])1018 mgr.candidates[last] = rpctype.RPCCandidate{}1019 mgr.candidates = mgr.candidates[:last]1020 }1021 if len(mgr.candidates) == 0 {1022 mgr.candidates = nil1023 if mgr.phase == phaseLoadedCorpus {1024 if mgr.cfg.HubClient != "" {1025 mgr.phase = phaseTriagedCorpus1026 go mgr.hubSyncLoop()1027 } else {1028 mgr.phase = phaseTriagedHub1029 }1030 } else if mgr.phase == phaseQueriedHub {1031 mgr.phase = phaseTriagedHub1032 }1033 }1034 return res1035}1036func (mgr *Manager) rotateCorpus() bool {1037 mgr.mu.Lock()1038 defer mgr.mu.Unlock()1039 return mgr.phase == phaseTriagedHub1040}1041func (mgr *Manager) collectUsedFiles() {1042 if mgr.vmPool == nil {1043 return1044 }1045 addUsedFile := func(f string) {1046 if f == "" {1047 return1048 }1049 stat, err := os.Stat(f)1050 if err != nil {1051 log.Fatalf("failed to stat %v: %v", f, err)1052 }1053 mgr.usedFiles[f] = stat.ModTime()1054 }1055 cfg := mgr.cfg...
collectUsedFiles
Using AI Code Generation
1import (2func main() {3 if len(os.Args) != 2 {4 fmt.Println("Please provide the directory path")5 }6 usedFiles := collectUsedFiles(dirPath)7 for _, usedFile := range usedFiles {8 fmt.Println(usedFile)9 }10}11import (12func main() {13 if len(os.Args) != 2 {14 fmt.Println("Please provide the directory path")15 }16 unusedFiles := findUnusedFiles(dirPath)17 for _, unusedFile := range unusedFiles {18 fmt.Println(unusedFile)19 }20}21import (22func main() {23 if len(os.Args) != 2 {24 fmt.Println("Please provide the directory path")25 }26 unusedImages := findUnusedImages(dirPath)27 for _, unusedImage := range unusedImages {28 fmt.Println(unusedImage)29 }30}31import (32func main() {33 if len(os.Args) != 2 {34 fmt.Println("Please provide the directory path")35 }36 unusedImages := findUnusedImages(dirPath)37 for _, unusedImage := range unusedImages {38 fmt.Println(unusedImage)39 }40}41import (42func main() {43 if len(os.Args) != 2 {44 fmt.Println("Please provide the directory path")45 }46 unusedImages := findUnusedImages(dirPath)47 for _, unusedImage := range unusedImages {48 fmt.Println(unusedImage)49 }50}51import (52func main() {
collectUsedFiles
Using AI Code Generation
1import (2func main() {3 wd, err := os.Getwd()4 if err != nil {5 fmt.Println(err)6 os.Exit(1)7 }8 _, filename, _, _ := runtime.Caller(0)9 dir := filepath.Dir(filename)10 parentDir := filepath.Dir(dir)11 grandParentDir := filepath.Dir(parentDir)12 greatGrandParentDir := filepath.Dir(grandParentDir)13 greatGreatGrandParentDir := filepath.Dir(greatGrandParentDir)14 greatGreatGreatGrandParentDir := filepath.Dir(greatGreatGrandParentDir)15 greatGreatGreatGreatGrandParentDir := filepath.Dir(greatGreatGreatGrandParentDir)16 greatGreatGreatGreatGreatGrandParentDir := filepath.Dir(greatGreatGreatGreatGrandParentDir)
collectUsedFiles
Using AI Code Generation
1func main() {2 f, err := os.Open("file.txt")3 if err != nil {4 fmt.Println(err)5 }6 defer f.Close()7 main := newMain()8 main.collectUsedFiles(f)9 fmt.Println(main.usedFiles)10}11func main() {12 f, err := os.Open("file.txt")13 if err != nil {14 fmt.Println(err)15 }16 defer f.Close()17 main := newMain()18 main.collectUsedFiles(f)19 fmt.Println(main.usedFiles)20}21func main() {22 f, err := os.Open("file.txt")23 if err != nil {24 fmt.Println(err)25 }26 defer f.Close()27 main := newMain()28 main.collectUsedFiles(f)29 fmt.Println(main.usedFiles)30}31func main() {32 f, err := os.Open("file.txt")33 if err != nil {34 fmt.Println(err)35 }36 defer f.Close()37 main := newMain()38 main.collectUsedFiles(f)39 fmt.Println(main.usedFiles)40}41func main() {42 f, err := os.Open("file.txt")43 if err != nil {44 fmt.Println(err)45 }46 defer f.Close()47 main := newMain()48 main.collectUsedFiles(f)49 fmt.Println(main.usedFiles)50}51func main() {52 f, err := os.Open("file.txt")53 if err != nil {54 fmt.Println(err)55 }56 defer f.Close()57 main := newMain()58 main.collectUsedFiles(f)59 fmt.Println(main.usedFiles)60}61func main() {62 f, err := os.Open("file.txt")63 if err != nil {64 fmt.Println(err)65 }66 defer f.Close()67 main := newMain()
collectUsedFiles
Using AI Code Generation
1import (2func main() {3 fmt.Println(gol.CollectUsedFiles())4}5import (6func main() {7 fmt.Println(gol.CollectUsedFiles())8}9import (10func main() {11 fmt.Println(gol.CollectUsedFiles())12}13import (14func main() {15 fmt.Println(gol.CollectUsedFiles())16}17import (18func main() {19 fmt.Println(gol.CollectUsedFiles())20}21import (22func main() {23 fmt.Println(gol.CollectUsedFiles())24}25import (26func main() {27 fmt.Println(gol.CollectUsedFiles())28}29import (30func main() {31 fmt.Println(gol.CollectUsedFiles())32}33import (34func main() {35 fmt.Println(gol.CollectUsedFiles())36}37import (
collectUsedFiles
Using AI Code Generation
1import (2func main() {3 files = append(files, "1.txt", "2.txt", "3.txt")4 fmt.Println(files)5 usedFiles := mainObj.collectUsedFiles(files)6 fmt.Println(usedFiles)7}8import (9func main() {10 files = append(files, "1.txt", "2.txt", "3.txt")11 fmt.Println(files)12 usedFiles := mainObj.collectUsedFiles(files)13 fmt.Println(usedFiles)14}15import (16func main() {17 files = append(files, "1.txt", "2.txt", "3.txt")18 fmt.Println(files)19 usedFiles := mainObj.collectUsedFiles(files)20 fmt.Println(usedFiles)21}22import (23func main() {24 files = append(files, "1.txt", "2.txt", "3.txt")25 fmt.Println(files)26 usedFiles := mainObj.collectUsedFiles(files)27 fmt.Println(usedFiles)28}29import (30func main() {31 files = append(files, "1.txt", "2.txt", "3.txt")32 fmt.Println(files)33 usedFiles := mainObj.collectUsedFiles(files)34 fmt.Println(usedFiles)35}36import (37func main() {38 files = append(files, "1.txt", "2.txt", "3.txt")39 fmt.Println(files)40 usedFiles := mainObj.collectUsedFiles(files)41 fmt.Println(usedFiles)42}43import (44func main()
collectUsedFiles
Using AI Code Generation
1import (2var (3 testFile = flag.Bool("t", false, "test file")4 mainFile = flag.Bool("m", false, "main file")5 pkgFile = flag.Bool("p", false, "package file")6 goFile = flag.Bool("g", false, "go file")7 ignoreFile = flag.Bool("i", false, "ignore file")8 ignoreDir = flag.Bool("d", false, "ignore dir")9func main() {10 flag.Parse()11 args := flag.Args()12 if len(args) == 0 {13 fmt.Println("Please provide the directory path")14 }15 result := collectUsedFiles(dir)16 for _, v := range result {17 fmt.Println(v)18 }19}20func collectUsedFiles(dir string) []string {21 usedFiles := make(map[string]bool)22 filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {23 if info.IsDir() {24 if ignoreDir != nil && *ignoreDir {25 }26 } else {27 if ignoreFile != nil && *ignoreFile {28 }29 if goFile != nil && *goFile {
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!