Why do I get Slower Frame Rate while Capturing Screen in OSX? - xcode

I am referring to the AVScreenShack example.I even got rid of the Preview feature to see if it improves. But, the Frame Skipping is very visible. I thought my computer is not powerful enough, but when I used QuickTime Player to record the Screen, the Video was silky smooth.
How can I improve the following code to get a high fps output?
func startRecording(){
mSession = AVCaptureSession()
mSession?.sessionPreset = AVCaptureSessionPresetHigh
let displayId : CGDirectDisplayID = CGMainDisplayID()
let session : AVCaptureSession
if mSession != nil {
session = mSession!
}else{
return
}
let input : AVCaptureScreenInput = AVCaptureScreenInput(displayID: displayId)
input.minFrameDuration = CMTimeMake(1, 35)
if let rect = delegate?.cropRect() {
input.cropRect = rect
}
if session.canAddInput(input) {
session.addInput(input)
}else{
return
}
mMovieFileOutput = AVCaptureMovieFileOutput()
if session.canAddOutput(mMovieFileOutput) {
session.addOutput(mMovieFileOutput!)
}
mSession?.startRunning()
mMovieFileOutput?.movieFragmentInterval = kCMTimeInvalid
mMovieFileOutput?.startRecording(toOutputFileURL: URL(fileURLWithPath:"/Users/Tester/Desktop/capture.mov"), recordingDelegate: self)
}

You can get a higher framerate by lowering the minimum frame duration:
input.minFrameDuration = CMTimeMake(1, 60)
N.B. this doesn't guarantee a higher framerate, but it allows one.

Related

Very slow framerate with AVFoundation and Metal in MacOS

I'm trying to adapt Apple's AVCamFilter sample to MacOS. The filtering appears to work, but rendering the processed image through Metal gives me a framerate of several seconds per frame. I've tried different approaches, but have been stuck for a long time.
This is the project AVCamFilterMacOS - Can anyone with better knowledge of AVFoundation with Metal tell me what's wrong? I've been reading the documentation and practicing getting the unprocessed image to display, as well as rendering other things like models to the metal view but I can't seem to get the processed CMSampleBuffer to render at a reasonable framerate.
Even if I skip the renderer and send the videoPixelBuffer to the metal view directly, the view's performance is pretty jittery.
Here is some of the relevant rendering code I'm using in the controller:
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
processVideo(sampleBuffer: sampleBuffer)
}
func processVideo(sampleBuffer: CMSampleBuffer) {
if !renderingEnabled {
return
}
guard let videoPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer),
let formatDescription = CMSampleBufferGetFormatDescription(sampleBuffer) else {
return
}
if !self.videoFilter.isPrepared {
/*
outputRetainedBufferCountHint is the number of pixel buffers the renderer retains. This value informs the renderer
how to size its buffer pool and how many pixel buffers to preallocate. Allow 3 frames of latency to cover the dispatch_async call.
*/
self.videoFilter.prepare(with: formatDescription, outputRetainedBufferCountHint: 3)
}
// Send the pixel buffer through the filter
guard let filteredBuffer = self.videoFilter.render(pixelBuffer: videoPixelBuffer) else {
print("Unable to filter video buffer")
return
}
self.previewView.pixelBuffer = filteredBuffer
}
And from the renderer:
func render(pixelBuffer: CVPixelBuffer) -> CVPixelBuffer? {
if !isPrepared {
assertionFailure("Invalid state: Not prepared.")
return nil
}
var newPixelBuffer: CVPixelBuffer?
CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, outputPixelBufferPool!, &newPixelBuffer)
guard let outputPixelBuffer = newPixelBuffer else {
print("Allocation failure: Could not get pixel buffer from pool. (\(self.description))")
return nil
}
guard let inputTexture = makeTextureFromCVPixelBuffer(pixelBuffer: pixelBuffer, textureFormat: .bgra8Unorm),
let outputTexture = makeTextureFromCVPixelBuffer(pixelBuffer: outputPixelBuffer, textureFormat: .bgra8Unorm) else {
return nil
}
// Set up command queue, buffer, and encoder.
guard let commandQueue = commandQueue,
let commandBuffer = commandQueue.makeCommandBuffer(),
let commandEncoder = commandBuffer.makeComputeCommandEncoder() else {
print("Failed to create a Metal command queue.")
CVMetalTextureCacheFlush(textureCache!, 0)
return nil
}
commandEncoder.label = "Rosy Metal"
commandEncoder.setComputePipelineState(computePipelineState!)
commandEncoder.setTexture(inputTexture, index: 0)
commandEncoder.setTexture(outputTexture, index: 1)
// Set up the thread groups.
let width = computePipelineState!.threadExecutionWidth
let height = computePipelineState!.maxTotalThreadsPerThreadgroup / width
let threadsPerThreadgroup = MTLSizeMake(width, height, 1)
let threadgroupsPerGrid = MTLSize(width: (inputTexture.width + width - 1) / width,
height: (inputTexture.height + height - 1) / height,
depth: 1)
commandEncoder.dispatchThreadgroups(threadgroupsPerGrid, threadsPerThreadgroup: threadsPerThreadgroup)
commandEncoder.endEncoding()
commandBuffer.commit()
return outputPixelBuffer
}
func makeTextureFromCVPixelBuffer(pixelBuffer: CVPixelBuffer, textureFormat: MTLPixelFormat) -> MTLTexture? {
let width = CVPixelBufferGetWidth(pixelBuffer)
let height = CVPixelBufferGetHeight(pixelBuffer)
// Create a Metal texture from the image buffer.
var cvTextureOut: CVMetalTexture?
CVMetalTextureCacheCreateTextureFromImage(kCFAllocatorDefault, textureCache, pixelBuffer, nil, textureFormat, width, height, 0, &cvTextureOut)
guard let cvTexture = cvTextureOut, let texture = CVMetalTextureGetTexture(cvTexture) else {
CVMetalTextureCacheFlush(textureCache, 0)
return nil
}
return texture
}
And finally the metal view:
override func draw(_ rect: CGRect) {
var pixelBuffer: CVPixelBuffer?
var mirroring = false
var rotation: Rotation = .rotate0Degrees
syncQueue.sync {
pixelBuffer = internalPixelBuffer
mirroring = internalMirroring
rotation = internalRotation
}
guard let drawable = currentDrawable,
let currentRenderPassDescriptor = currentRenderPassDescriptor,
let previewPixelBuffer = pixelBuffer else {
return
}
// Create a Metal texture from the image buffer.
let width = CVPixelBufferGetWidth(previewPixelBuffer)
let height = CVPixelBufferGetHeight(previewPixelBuffer)
if textureCache == nil {
createTextureCache()
}
var cvTextureOut: CVMetalTexture?
CVMetalTextureCacheCreateTextureFromImage(kCFAllocatorDefault,
textureCache!,
previewPixelBuffer,
nil,
.bgra8Unorm,
width,
height,
0,
&cvTextureOut)
guard let cvTexture = cvTextureOut, let texture = CVMetalTextureGetTexture(cvTexture) else {
print("Failed to create preview texture")
CVMetalTextureCacheFlush(textureCache!, 0)
return
}
if texture.width != textureWidth ||
texture.height != textureHeight ||
self.bounds != internalBounds ||
mirroring != textureMirroring ||
rotation != textureRotation {
setupTransform(width: texture.width, height: texture.height, mirroring: mirroring, rotation: rotation)
}
// Set up command buffer and encoder
guard let commandQueue = commandQueue else {
print("Failed to create Metal command queue")
CVMetalTextureCacheFlush(textureCache!, 0)
return
}
guard let commandBuffer = commandQueue.makeCommandBuffer() else {
print("Failed to create Metal command buffer")
CVMetalTextureCacheFlush(textureCache!, 0)
return
}
guard let commandEncoder = commandBuffer.makeRenderCommandEncoder(descriptor: currentRenderPassDescriptor) else {
print("Failed to create Metal command encoder")
CVMetalTextureCacheFlush(textureCache!, 0)
return
}
commandEncoder.label = "Preview display"
commandEncoder.setRenderPipelineState(renderPipelineState!)
commandEncoder.setVertexBuffer(vertexCoordBuffer, offset: 0, index: 0)
commandEncoder.setVertexBuffer(textCoordBuffer, offset: 0, index: 1)
commandEncoder.setFragmentTexture(texture, index: 0)
commandEncoder.setFragmentSamplerState(sampler, index: 0)
commandEncoder.drawPrimitives(type: .triangleStrip, vertexStart: 0, vertexCount: 4)
commandEncoder.endEncoding()
// Draw to the screen.
commandBuffer.present(drawable)
commandBuffer.commit()
}
All of this code is in the linked project
Capture device delegates don't own the sample buffers they receive in their callbacks, so it's incumbent on the receiver to make sure they're retained for as long as their contents are needed. This project doesn't currently ensure that.
Rather, by calling CMSampleBufferGetImageBuffer and wrapping the resulting pixel buffer in a texture, the view controller is allowing the sample buffer to be released, meaning that future operations on its corresponding pixel buffer are undefined.
One way to ensure the sample buffer lives long enough to be processed is to add a private member to the camera view controller class that retains the most-recently received sample buffer:
private var sampleBuffer: CMSampleBuffer!
and then set this member in the captureOutput(...) method before calling processVideo. You don't even have to reference it further; the fact that it's retained should prevent the stuttery and unpredictable behavior you're seeing.
This solution may not be perfect, since it retains the sample buffer for longer than strictly necessary in the event of a capture session interruption or other pause. You can devise your own scheme for managing object lifetimes; the important thing is to ensure that the root sample buffer object sticks around until you're done with any textures that refer to its contents.

Is it possible to feed MediaStream frames at lower framerate?

I would like to use MediaStream.captureStream() method, but it is either rendered useless due to specification and bugs or I am using it totally wrong.
I know that captureStream gets maximal framerate as the parameter, not constant and it does not even guarantee that, but it is possible to change MediaStream currentTime (currently in Chrome, in Firefox it has no effect but in return there is requestFrame, not available at Chrome), but the idea of manual frame requests or setting the placement of the frame in the MediaStream should override this effect. It doesn't.
In Firefox it smoothly renders the video, frame by frame, but the video result is as long as wall clock time used for processing.
In Chrome there are some dubious black frames or reordered ones (currently I do not care about it until the FPS matches), and the manual setting of currentTime gives nothing, the same result as in FF.
I use modified code from MediaStream Capture Canvas and Audio Simultaneously answer.
const FPS = 30;
var cStream, vid, recorder, chunks = [], go = true,
Q = 61, rec = document.getElementById('rec'),
canvas = document.getElementById('canvas'),
ctx = canvas.getContext('2d');
ctx.strokeStyle = 'rgb(255, 0, 0)';
function clickHandler() {
this.textContent = 'stop recording';
//it has no effect no matter if it is empty or set to 30
cStream = canvas.captureStream(FPS);
recorder = new MediaRecorder(cStream);
recorder.ondataavailable = saveChunks;
recorder.onstop = exportStream;
this.onclick = stopRecording;
recorder.start();
draw();
}
function exportStream(e) {
if (chunks.length) {
var blob = new Blob(chunks)
var vidURL = URL.createObjectURL(blob);
var vid2 = document.createElement('video');
vid2.controls = true;
vid2.src = vidURL;
vid2.onend = function() {
URL.revokeObjectURL(vidURL);
}
document.body.insertBefore(vid2, vid);
} else {
document.body.insertBefore(document.createTextNode('no data saved'), canvas);
}
}
function saveChunks(e) {
e.data.size && chunks.push(e.data);
}
function stopRecording() {
go = false;
this.parentNode.removeChild(this);
recorder.stop();
}
var loadVideo = function() {
vid = document.createElement('video');
document.body.insertBefore(vid, canvas);
vid.oncanplay = function() {
rec.onclick = clickHandler;
rec.disabled = false;
canvas.width = vid.videoWidth;
canvas.height = vid.videoHeight;
vid.oncanplay = null;
ctx.drawImage(vid, 0, 0);
}
vid.onseeked = function() {
ctx.drawImage(vid, 0, 0);
/*
Here I want to include additional drawing per each frame,
for sure taking more than 180ms
*/
if(cStream && cStream.requestFrame) cStream.requestFrame();
draw();
}
vid.crossOrigin = 'anonymous';
vid.src = 'https://dl.dropboxusercontent.com/s/bch2j17v6ny4ako/movie720p.mp4';
vid.currentTime = 0;
}
function draw() {
if(go && cStream) {
++Q;
cStream.currentTime = Q / FPS;
vid.currentTime = Q / FPS;
}
};
loadVideo();
<button id="rec" disabled>record</button><br>
<canvas id="canvas" width="500" height="500"></canvas>
Is there a way to make it operational?
The goal is to load video, process every frame (which is time consuming in my case) and return the processed one.
Footnote: I do not want to use ffmpeg.js, external server or other technologies. I can process it by classic ffmpeg without using JavaScript at all, but this is not the point of this question, it is more about MediaStream usability / maturity. The context is Firefox/Chrome here, but it may be node.js or nw.js as well. If this is possible at all or awaiting bug fixes, the next question would be feeding audio to it, but I think it would be good as separate question.

How to “crop” SCNAnimationPlayer to a specific start and end time, iOS 11

I have a .dae model with a long animation. The animation includes segments of walking, running, hitting, death, etc. I know the frame numbers of the start and end of each segment. I also know the frame per second rate. So getting the time of the start and end of each segment is pretty easy.
I can get the full animation as a SCNAnimationPlayer object. What I’ve been experimenting with is making a copy of the full animation and then setting the timeOffset and duration of the animation.
let walkPlayer = fullPlayer.copy() as! SCNAnimationPlayer
walkPlayer.stop()
walkPlayer.animation.timeOffset = walk.offset
walkPlayer.animation.duration = walk.duration
I then add the walkPlayer back to the Bip01 node (where I got the full animation from).
I can play the walk easily enough by calling animationPlayer(forKey:"walk")?.play()
I can change the duration and other aspects of the animation easily enough. But the animation always starts at frame 0. Whatever value I put into .timeOffset, it just gets ignored.
How can I play from a start frame to an end frame of an SCNAnimation found in SCNAnimationPlayer?
The key bit was to find
CAAnimation(scnAnimation: animation)
and
SCNAnimation(caAnimation: animation)
Once I found these then I could use CAAnimationGroup to “crop” the full animation.
Here’s my Troll.swift that I was working on. There is, of course, much to do but now I can at least make the poor beast walk and die.
class Troll: SCNNode {
var body:SCNNode!
static func timeRange(forStartingAtFrame start:Int, endingAtFrame end:Int, fps:Double = 30) -> (offset:TimeInterval, duration:TimeInterval) {
let startTime = self.time(atFrame: start, fps: fps) //TimeInterval(start) / fps
let endTime = self.time(atFrame: end, fps: fps) //TimeInterval(end) / fps
return (offset:startTime, duration:endTime - startTime)
}
static func time(atFrame frame:Int, fps:Double = 30) -> TimeInterval {
return TimeInterval(frame) / fps
}
static func animation(from full:CAAnimation, startingAtFrame start:Int, endingAtFrame end:Int, fps:Double = 30) -> CAAnimation {
let range = self.timeRange(forStartingAtFrame: start, endingAtFrame: end, fps: fps)
let animation = CAAnimationGroup()
let sub = full.copy() as! CAAnimation
sub.timeOffset = range.offset
animation.animations = [sub]
animation.duration = range.duration
return animation
}
func load() {
guard let trollScene = SCNScene(named: "Models.scnassets/troll/troll.dae") else {
fatalError("Can't load the scene")
}
guard let troll_body = trollScene.rootNode.childNode(withName: "troll", recursively: true) else {
fatalError( "found no troll")
}
guard let troll_weapon = trollScene.rootNode.childNode(withName: "troll_weapon", recursively: true) else {
fatalError( "found no troll_weapon")
}
guard let troll_bracelet = trollScene.rootNode.childNode(withName: "troll_bracelet", recursively: true) else {
fatalError( "found no troll_bracelet")
}
guard let bips = trollScene.rootNode.childNode(withName: "Bip01", recursively: true) else {
fatalError( "found no Bip01")
}
guard let fullKey = bips.animationKeys.first else {
fatalError( "Bip01 got no animation")
}
guard let fullPlayer = bips.animationPlayer(forKey: fullKey) else {
fatalError( "Bip01 got no player for \(fullKey)")
}
let fullAnimation = CAAnimation(scnAnimation: fullPlayer.animation)
self.addChildNode(troll_body)
self.addChildNode(troll_weapon)
self.addChildNode(troll_bracelet)
self.addChildNode(bips)
self.body = bips
self.body.removeAllAnimations()
let walkAnimation = Troll.animation(from: fullAnimation, startingAtFrame: 10, endingAtFrame: 60)
walkAnimation.repeatCount = .greatestFiniteMagnitude
walkAnimation.fadeInDuration = 0.3
walkAnimation.fadeOutDuration = 0.3
let walkPlayer = SCNAnimationPlayer(animation: SCNAnimation(caAnimation: walkAnimation))
self.body.addAnimationPlayer(walkPlayer, forKey: "walk")
let deathAnimation = Troll.animation(from: fullAnimation, startingAtFrame: 1810, endingAtFrame: 1850)
deathAnimation.isRemovedOnCompletion = false
deathAnimation.fadeInDuration = 0.3
deathAnimation.fadeOutDuration = 0.3
let deathPlayer = SCNAnimationPlayer(animation: SCNAnimation(caAnimation: deathAnimation))
self.body.addAnimationPlayer(deathPlayer, forKey: "death")
self.scale = SCNVector3(0.1,0.1,0.1)
}
func walk() {
print( "+++ walk +++" )
self.body.animationPlayer(forKey: "walk")?.play()
}
func death() {
print( "+++ death +++" )
self.body.animationPlayer(forKey: "walk")?.stop(withBlendOutDuration: 0.3)
self.body.animationPlayer(forKey: "death")?.play()
}
}
For anyone who wants to stop animation on one specular frame. By the way, It is pretty weird that SCNAnimationPlayer.animation doesn't support timeOffset setting.
+(SCNAnimationPlayer*)animationPlayer:(SCNAnimationPlayer *)animPlayer onTimeOffset:(CGFloat)timeOffset{
SCNAnimation *anim = animPlayer.animation;
CAAnimation *caAnim = [CAAnimation animationWithSCNAnimation:anim];
caAnim.timeOffset = timeOffset * caAnim.duration;
caAnim.speed = 0;
caAnim.usesSceneTimeBase = NO;
anim = [SCNAnimation animationWithCAAnimation:caAnim];
animPlayer = [SCNAnimationPlayer animationPlayerWithAnimation:anim];
return animPlayer;
}
If you want to stop skeleton animation on the half of progress, set the timeOffset to 0.5.
SCNAnimationPlayer *animPlayer = [SCNAnimationPlayer animationPlayer:[node animationPlayerForKey:key] onTimeOffset:0.5];
[node addAnimationPlayer:animPlayer forKey:key];
[animPlayer play];

Xamarin.Android Record Video - Quality Poor

I'm using the following Xamarin tutorial https://developer.xamarin.com/recipes/android/media/video/record_video/
I can successfully record video and audio however the quality is not very good. Can anyone suggest/explain how I can increase the quality please?
I know the device can record in higher quality because the native camera app record in much higher quality.
EDIT here is my code so far
protected override void OnCreate(Bundle savedInstanceState)
{
base.OnCreate(savedInstanceState);
// Set our view from the "main" layout resource
SetContentView(Resource.Layout.RecordVideo);
var record = FindViewById<Button>(Resource.Id.Record);
var stop = FindViewById<Button>(Resource.Id.Stop);
var play = FindViewById<Button>(Resource.Id.Play);
var video = FindViewById<VideoView>(Resource.Id.SampleVideoView);
var videoPlayback = FindViewById<VideoView>(Resource.Id.PlaybackVideoView);
string path = Android.OS.Environment.ExternalStorageDirectory.AbsolutePath + "/test.mp4";
if (Camera.NumberOfCameras < 2)
{
Toast.MakeText(this, "Front camera missing", ToastLength.Long).Show();
return;
}
video.Visibility = ViewStates.Visible;
videoPlayback.Visibility = ViewStates.Gone;
_camera = Camera.Open(1);
_camera.SetDisplayOrientation(90);
_camera.Unlock();
recorder = new MediaRecorder();
recorder.SetCamera(_camera);
recorder.SetAudioSource(AudioSource.Mic);
recorder.SetVideoSource(VideoSource.Camera);
recorder.SetOutputFormat(OutputFormat.Default);
recorder.SetAudioEncoder(AudioEncoder.Default);
recorder.SetVideoEncoder(VideoEncoder.Default);
//var cameraProfile = CamcorderProfile.Get(CamcorderQuality.HighSpeed1080p);
// recorder.SetProfile(cameraProfile);
recorder.SetOutputFile(path);
recorder.SetOrientationHint(270);
recorder.SetPreviewDisplay(video.Holder.Surface);
record.Click += delegate
{
recorder.Prepare();
recorder.Start();
};
stop.Click += delegate
{
if (recorder != null)
{
video.Visibility = ViewStates.Gone;
videoPlayback.Visibility = ViewStates.Visible;
recorder.Stop();
recorder.Release();
}
};
play.Click += delegate
{
video.Visibility = ViewStates.Gone;
videoPlayback.Visibility = ViewStates.Visible;
var uri = Android.Net.Uri.Parse(path);
videoPlayback.SetVideoURI(uri);
videoPlayback.Start();
};
}
I don't see the example specifying the CamcorderProfile anywhere so you might want to start from that. It's possible that the default framerate, bitrate and video frame size are lower than you'd expect. I'm not an a computer right now but try to set the profile to for example QUALITY_1080p using the SetProfile method in MediaRecorder.
You need to set the profile after setting the video and audio sources but before calling SetOutputFile method.

Trying to create a very basic game, but experiencing some bottlenecking issues (I think)!

I'm using Adobe Flash Professional CS6 to create the game. I'll post the code under. Be noticed that there are two symbol I've created using Flash that are not made by code. These symbols are the Crosshair symbol, and the Hitbox symbol. Basically, the objective of the game is to click the Hitbox symbol. My issue is that I am experiencing what seems to be bottlenecking issues. When I click the Hitbox symbol a lot of times with a fast timer the score doesn't register. I am pressuming that this comes from the (maybe) ineffective movement algorithm. But I can't really seem to find room for improvement. Some help would be appreciated.
Be noticed, I had to change the timer from Timer(1) to Timer(30). This made the bottlenecking issue a little bit better, but made the game less fluent.
Aah, and the reason as to why I am using the directionCheckerY and directionCheckerX variables is that I will later in the development add random movement. A random timer will change these to either 0 and 1, creating random movement.
import flash.events.MouseEvent;
import flash.events.TimerEvent;
// Variables
var directionCheckerX:int=0;
var directionCheckerY:int=0;
var pointChecker:int=0;
// Croshair
var crosshair:Crosshair = new Crosshair();
addChild(crosshair);
Mouse.hide();
function moveCrossEvent (evt: MouseEvent) {
crosshair.x = mouseX;
crosshair.y = mouseY;
evt.updateAfterEvent();
}
// Hitbox
var hitbox:Hitbox = new Hitbox();
addChild(hitbox);
hitbox.x=50;
hitbox.y=50;
// Timer
var myTimer:Timer = new Timer(30);
myTimer.addEventListener(TimerEvent.TIMER, timerEvent);
myTimer.start();
function timerEvent(evt:TimerEvent) {
// Border code (Keeps the Hitbox away from out of bounds)
if (hitbox.x <= 0) {
directionCheckerX = 1;
} else if (hitbox.x >= 550) {
directionCheckerX = 0;
}
if (directionCheckerX == 0) {
hitbox.x-=2;
} else {
hitbox.x+=2;
}
if (hitbox.y <= 0) {
directionCheckerY = 1;
} else if (hitbox.y >= 400) {
directionCheckerY = 0;
}
if (directionCheckerY == 0) {
hitbox.y-=2;
} else {
hitbox.y+=2;
}
}
// EventListeners
stage.addEventListener(MouseEvent.MOUSE_MOVE, moveCrossEvent);
hitbox.addEventListener(MouseEvent.CLICK, hitboxEvent);
stage.addEventListener(MouseEvent.CLICK, stageEvent);
function hitboxEvent (evt:MouseEvent) {
pointChecker+=1;
outputTxt.text = String(pointChecker);
evt.stopImmediatePropagation();
//evt.updateAfterEvent();
}
function stageEvent(evt:MouseEvent) {
pointChecker-=1;
outputTxt.text = String(pointChecker);
}
To be clear, I'm not a game developer.
Actually, sometimes there is no big difference between a Timer with 1 millisecond interval and another one with 30 milliseconds interval because it's depending on the SWF file's framerate or the runtime environment ... but here, what about using an Event.ENTER_FRAME event instead of a Timer ? because as Adobe said here about Timers versus ENTER_FRAME events :
Choose either timers or ENTER_FRAME events, depending on whether content is animated.
Timers are preferred over Event.ENTER_FRAME events for non-animated content that executes for a long time.
and in your case the content is animated (even if your game is still basic).
Then you can use a var to set the speed of your hitbox which you can update at any time :
var speed:int = 2;
function timerEvent(evt:TimerEvent): void
{
// ...
if (directionCheckerX == 0) {
hitbox.x -= speed;
} else {
hitbox.x += speed;
}
// ...
}
Hope that can help.

Resources